Official Python SDK for the HINOW AI Inference API
Website • Documentation • Get API Key
Access 100+ AI models through a single unified API. Generate text, images, audio, video, and embeddings with simple API calls.
pip install hinow-aifrom hinow_ai import Hinow
client = Hinow(api_key="hi_xxx")
response = client.chat.completions.create(
model="deepseek-ai/deepseek-v3.2",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)- Chat Completions - Text generation with LLMs
- Function Calling - Let AI call your functions
- Vision - Analyze images with AI
- Image Generation - Create images from text
- Text-to-Speech - Convert text to audio
- Speech-to-Text - Transcribe audio to text
- Video Generation - Create videos from text/images
- Embeddings - Generate text embeddings
- Streaming - Real-time response streaming
- Async Support - Full async/await support
response = client.chat.completions.create(
model="deepseek-ai/deepseek-v3.2",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"}
],
temperature=0.7,
max_tokens=1024
)
print(response.choices[0].message.content)for chunk in client.chat.completions.create(
model="deepseek-ai/deepseek-v3.2",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
):
if chunk.choices[0].delta.get("content"):
print(chunk.choices[0].delta["content"], end="")response = client.chat.completions.create(
model="google/gemma-3-27b-it",
messages=[{
"role": "user",
"content": [
{"type": "text", "text": "What's in this image?"},
{"type": "image_url", "image_url": {"url": "https://example.com/image.jpg"}}
]
}]
)Let the AI call your functions to perform actions or retrieve data.
# 1. Define your tools
tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name, e.g. Paris, London"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}]
# 2. Send request with tools
response = client.chat.completions.create(
model="deepseek-ai/deepseek-v3.2",
messages=[{"role": "user", "content": "What is the weather in Paris?"}],
tools=tools,
tool_choice="auto"
)
# 3. Handle tool calls
message = response.choices[0].message
if message.tool_calls:
for tool_call in message.tool_calls:
args = json.loads(tool_call["function"]["arguments"])
# Execute your function
result = get_weather(args["location"], args.get("unit", "celsius"))
# Send result back
follow_up = client.chat.completions.create(
model="deepseek-ai/deepseek-v3.2",
messages=[
{"role": "user", "content": "What is the weather in Paris?"},
{"role": "assistant", "content": "", "tool_calls": message.tool_calls},
{"role": "tool", "tool_call_id": tool_call["id"], "content": json.dumps(result)}
]
)
print(follow_up.choices[0].message.content)response = client.images.generate(
model="black-forest-labs/flux-1-schnell",
prompt="A beautiful sunset over mountains",
aspect_ratio="16:9",
output_format="jpeg"
)
print(response.data[0].url)response = client.images.edit(
model="stability-ai/sd3-turbo",
prompt="Transform into cyberpunk style",
images=["https://example.com/photo.jpg"],
aspect_ratio="1:1"
)
print(response.data[0].url)response = client.audio.speech.create(
model="inworld/tts-1.5-max",
input="Hello, welcome to HINOW AI!",
voice_id="Ashley",
voice_gender="female",
language="English",
output_format="mp3"
)
print(response.audio_url)response = client.audio.transcriptions.create(
model="openai/whisper-large-v3",
audio_url="https://example.com/audio.mp3",
language="en"
)
print(response.text)response = client.video.generate(
model="runway/gen-3-alpha",
prompt="A rocket launching into space",
aspect_ratio="16:9"
)
print(response.data[0].url)response = client.embeddings.create(
model="BAAI/bge-base-en-v1.5",
input=["Hello world", "Goodbye world"]
)
# Access embedding vectors
print(len(response.data[0].embedding)) # Vector dimensionsmodels = client.models.list()
for model in models.data:
print(f"{model.id}: {model.name}")model = client.models.retrieve("deepseek-ai/deepseek-v3.2")
print(model.context_window)balance = client.get_balance()
print(f"Balance: ${balance['balance']}")import asyncio
from hinow_ai import Hinow
async def main():
client = Hinow(api_key="hi_xxx")
response = await client.chat.completions.create_async(
model="deepseek-ai/deepseek-v3.2",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
await client.aclose()
asyncio.run(main())client = Hinow(
api_key="hi_xxx", # Required (or set HINOW_API_KEY env var)
base_url="https://api.hinow.ai", # Optional: custom base URL
timeout=120.0, # Optional: request timeout in seconds
max_retries=3, # Optional: max retry attempts
)export HINOW_API_KEY=hi_xxx
export HINOW_BASE_URL=https://api.hinow.ai # Optional# API key will be read from environment
client = Hinow()from hinow_ai import (
Hinow,
AuthenticationError,
RateLimitError,
InsufficientBalanceError,
InvalidRequestError,
APIError,
)
try:
response = client.chat.completions.create(...)
except AuthenticationError:
print("Invalid API key")
except InsufficientBalanceError:
print("Please add credits to your account")
except RateLimitError:
print("Rate limited, please retry later")
except InvalidRequestError as e:
print(f"Bad request: {e.message}")
except APIError as e:
print(f"API error [{e.status_code}]: {e.message}")HINOW provides access to 100+ models across multiple providers:
| Category | Example Models |
|---|---|
| Chat/LLM | DeepSeek V3, Gemma 3, Llama 3.3, Qwen 2.5 |
| Image Generation | FLUX, Stable Diffusion 3, DALL-E 3 |
| Text-to-Speech | ElevenLabs, OpenAI TTS, Inworld |
| Speech-to-Text | Whisper Large V3 |
| Video | Runway Gen-3, Kling, Minimax |
| Embeddings | BGE, E5, OpenAI Embeddings |
MIT © HINOW AI