from purerouter import PureRouterfrom purerouter.types import InferRequest, InvokeRequest# Initialize the client with your API keyclient = PureRouter(router_key="your-api-key-here")
# Economy profile - cost-optimizedresp_economy = client.router.infer(InferRequest( prompt="What is the capital of Brazil?", profile="economy"))print(resp_economy.output_text)# Balanced profile - balance between cost and qualityresp_balanced = client.router.infer(InferRequest( prompt="Explain the theory of relativity", profile="balanced"))print(resp_balanced.output_text)# Quality profile - prioritizes response qualityresp_quality = client.router.infer(InferRequest( prompt="Write a poem about artificial intelligence", profile="quality"))print(resp_quality.output_text)
You can also directly call a specific model using its deployment ID:
Copy
# Direct call to a specific modelresponse = client.deployments.invoke( "ca10db2f-364e-55dc-9d0f-b56e36f1140f", # Deployment ID InvokeRequest( messages=[{"role": "user", "content": "Hello, how can I help?"}], parameters={"temperature": 0.7} ))print(response)
# Getting streaming responsesfor chunk in client.router.infer_stream(InferRequest( prompt="Tell a long story", profile="quality")): print(chunk.output_text, end="", flush=True)
try: response = client.router.infer(InferRequest( prompt="What is the answer to life, the universe, and everything?", profile="economy" )) print(response.output_text)except Exception as e: print(f"Error making request: {e}")