Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### New features

* Added `ChatDeepSeek()` for chatting via [DeepSeek](https://www.deepseek.com/).
* Added `ChatDeepSeek()` for chatting via [DeepSeek](https://www.deepseek.com/). (#147)
* Added `ChatOpenRouter()` for chatting via [Open Router](https://openrouter.ai/). (#148)
* Added `ChatHuggingFace()` for chatting via [Hugging Face](https://huggingface.co/). (#144)
* Added `ChatPortkey()` for chatting via [Portkey AI](https://portkey.ai/). (#143)

Expand Down
2 changes: 2 additions & 0 deletions chatlas/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from ._provider_huggingface import ChatHuggingFace
from ._provider_ollama import ChatOllama
from ._provider_openai import ChatAzureOpenAI, ChatOpenAI
from ._provider_openrouter import ChatOpenRouter
from ._provider_perplexity import ChatPerplexity
from ._provider_portkey import ChatPortkey
from ._provider_snowflake import ChatSnowflake
Expand All @@ -39,6 +40,7 @@
"ChatHuggingFace",
"ChatOllama",
"ChatOpenAI",
"ChatOpenRouter",
"ChatAzureOpenAI",
"ChatPerplexity",
"ChatPortkey",
Expand Down
149 changes: 149 additions & 0 deletions chatlas/_provider_openrouter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,149 @@
from __future__ import annotations

import os
from typing import TYPE_CHECKING, Optional

from ._chat import Chat
from ._logging import log_model_default
from ._provider_openai import OpenAIProvider
from ._utils import MISSING, MISSING_TYPE, is_testing

if TYPE_CHECKING:
from ._provider_openai import ChatCompletion
from .types.openai import ChatClientArgs, SubmitInputArgs


def ChatOpenRouter(
*,
system_prompt: Optional[str] = None,
model: Optional[str] = None,
api_key: Optional[str] = None,
base_url: str = "https://openrouter.ai/api/v1",
seed: Optional[int] | MISSING_TYPE = MISSING,
kwargs: Optional["ChatClientArgs"] = None,
) -> Chat["SubmitInputArgs", ChatCompletion]:
"""
Chat with one of the many models hosted on OpenRouter.

OpenRouter provides access to a wide variety of language models from different providers
through a unified API. Support for features depends on the underlying model that you use.

Prerequisites
-------------

::: {.callout-note}
## API key

Sign up at <https://openrouter.ai> to get an API key.
:::

Examples
--------

```python
import os
from chatlas import ChatOpenRouter

chat = ChatOpenRouter(api_key=os.getenv("OPENROUTER_API_KEY"))
chat.chat("What is the capital of France?")
```

Parameters
----------
system_prompt
A system prompt to set the behavior of the assistant.
model
The model to use for the chat. The default, None, will pick a reasonable
default, and warn you about it. We strongly recommend explicitly choosing
a model for all but the most casual use. See <https://openrouter.ai/models>
for available models.
api_key
The API key to use for authentication. You generally should not supply
this directly, but instead set the `OPENROUTER_API_KEY` environment variable.
base_url
The base URL to the endpoint; the default uses OpenRouter's API.
seed
Optional integer seed that the model uses to try and make output more
reproducible.
kwargs
Additional arguments to pass to the `openai.OpenAI()` client constructor.

Returns
-------
Chat
A chat object that retains the state of the conversation.

Note
----
This function is a lightweight wrapper around [](`~chatlas.ChatOpenAI`) with
the defaults tweaked for OpenRouter.

Note
----
Pasting an API key into a chat constructor (e.g., `ChatOpenRouter(api_key="...")`)
is the simplest way to get started, and is fine for interactive use, but is
problematic for code that may be shared with others.

Instead, consider using environment variables or a configuration file to manage
your credentials. One popular way to manage credentials is to use a `.env` file
to store your credentials, and then use the `python-dotenv` package to load them
into your environment.

```shell
pip install python-dotenv
```

```shell
# .env
OPENROUTER_API_KEY=...
```

```python
from chatlas import ChatOpenRouter
from dotenv import load_dotenv

load_dotenv()
chat = ChatOpenRouter()
chat.console()
```

Another, more general, solution is to load your environment variables into the shell
before starting Python (maybe in a `.bashrc`, `.zshrc`, etc. file):

```shell
export OPENROUTER_API_KEY=...
```
"""
if model is None:
model = log_model_default("gpt-4.1")

if api_key is None:
api_key = os.getenv("OPENROUTER_API_KEY")

if isinstance(seed, MISSING_TYPE):
seed = 1014 if is_testing() else None

kwargs2 = add_default_headers(kwargs or {})

return Chat(
provider=OpenAIProvider(
api_key=api_key,
model=model,
base_url=base_url,
seed=seed,
name="OpenRouter",
kwargs=kwargs2,
),
system_prompt=system_prompt,
)


def add_default_headers(kwargs: "ChatClientArgs") -> "ChatClientArgs":
headers = kwargs.get("default_headers", None)
# https://openrouter.ai/docs/api-keys
default_headers = {
"HTTP-Referer": "https://posit-dev.github.io/chatlas",
"X-Title": "chatlas",
**(headers or {}),
}
return {"default_headers": default_headers, **kwargs}
1 change: 1 addition & 0 deletions docs/_quarto.yml
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ quartodoc:
- ChatHuggingFace
- ChatOllama
- ChatOpenAI
- ChatOpenRouter
- ChatPerplexity
- ChatPortkey
- ChatSnowflake
Expand Down
3 changes: 2 additions & 1 deletion docs/get-started/models.qmd
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ To see the pre-requisites for a given provider, visit the relevant usage page in
| Google (Gemini) | [`ChatGoogle()`](../reference/ChatGoogle.qmd) | |
| Groq | [`ChatGroq()`](../reference/ChatGroq.qmd) | |
| Hugging Face | [`ChatHuggingFace()`](../reference/ChatHuggingFace.qmd) | |
| Ollama local models | [`ChatOllama()`](../reference/ChatOllama.qmd) | |
| Ollama (local models) | [`ChatOllama()`](../reference/ChatOllama.qmd) | |
| Open Router | [`ChatOpenRouter()`](../reference/ChatOpenRouter.qmd) | |
| OpenAI | [`ChatOpenAI()`](../reference/ChatOpenAI.qmd) | |
| perplexity.ai | [`ChatPerplexity()`](../reference/ChatPerplexity.qmd) | |
| AWS Bedrock | [`ChatBedrockAnthropic()`](../reference/ChatBedrockAnthropic.qmd) | ✅ |
Expand Down
68 changes: 68 additions & 0 deletions tests/test_provider_openrouter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import os

import pytest
from chatlas import ChatOpenRouter

from .conftest import (
assert_data_extraction,
assert_images_inline,
assert_images_remote,
assert_tools_simple,
)

api_key = os.getenv("OPENROUTER_API_KEY")
if api_key is None:
pytest.skip(
"OPENROUTER_API_KEY is not set; skipping tests", allow_module_level=True
)


def test_openrouter_simple_request():
chat = ChatOpenRouter(
model="openai/gpt-4o-mini-2024-07-18",
system_prompt="Be as terse as possible; no punctuation",
)
chat.chat("What is 1 + 1?")
turn = chat.get_last_turn()
assert turn is not None
assert turn.tokens is not None
assert len(turn.tokens) == 3
assert turn.tokens[0] >= 1
assert turn.finish_reason == "stop"


@pytest.mark.asyncio
async def test_openrouter_simple_streaming_request():
chat = ChatOpenRouter(
model="openai/gpt-4o-mini-2024-07-18",
system_prompt="Be as terse as possible; no punctuation",
)
res = []
async for x in await chat.stream_async("What is 1 + 1?"):
res.append(x)
assert "2" in "".join(res)
turn = chat.get_last_turn()
assert turn is not None
assert turn.finish_reason == "stop"


def test_openrouter_tool_variations():
def chat_fun(**kwargs):
return ChatOpenRouter(model="openai/gpt-4o-mini-2024-07-18", **kwargs)

assert_tools_simple(chat_fun)


def test_data_extraction():
def chat_fun(**kwargs):
return ChatOpenRouter(model="openai/gpt-4o-mini-2024-07-18", **kwargs)

assert_data_extraction(chat_fun)


def test_openrouter_images():
def chat_fun(**kwargs):
return ChatOpenRouter(model="openai/gpt-4o-mini-2024-07-18", **kwargs)

assert_images_inline(chat_fun)
assert_images_remote(chat_fun)
Loading