Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@ $ rye sync --all-features
You can then run scripts using `rye run python script.py` or by activating the virtual environment:

```sh
$ rye shell
# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work
# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work
$ source .venv/bin/activate

# now you can omit the `rye run` prefix
Expand Down
7 changes: 5 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "llama_api_client"
version = "0.1.1"
version = "0.1.2"
description = "The official Python library for the llama-api-client API"
dynamic = ["readme"]
license = "MIT"
Expand Down Expand Up @@ -37,6 +37,8 @@ classifiers = [
Homepage = "https://github.com/meta-llama/llama-api-python"
Repository = "https://github.com/meta-llama/llama-api-python"

[project.optional-dependencies]
aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.6"]

[tool.rye]
managed = true
Expand All @@ -54,6 +56,7 @@ dev-dependencies = [
"importlib-metadata>=6.7.0",
"rich>=13.7.1",
"nest_asyncio==1.6.0",
"pytest-xdist>=3.6.1",
]

[tool.rye.scripts]
Expand Down Expand Up @@ -125,7 +128,7 @@ replacement = '[\1](https://github.com/meta-llama/llama-api-python/tree/main/\g<

[tool.pytest.ini_options]
testpaths = ["tests"]
addopts = "--tb=short"
addopts = "--tb=short -n auto"
xfail_strict = true
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "session"
Expand Down
31 changes: 31 additions & 0 deletions requirements-dev.lock
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,24 @@
# universal: false

-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.12.8
# via httpx-aiohttp
# via llama-api-client
aiosignal==1.3.2
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.4.0
# via httpx
# via llama-api-client
argcomplete==3.1.2
# via nox
async-timeout==5.0.1
# via aiohttp
attrs==25.3.0
# via aiohttp
certifi==2023.7.22
# via httpcore
# via httpx
Expand All @@ -30,25 +41,37 @@ distro==1.8.0
exceptiongroup==1.2.2
# via anyio
# via pytest
execnet==2.1.1
# via pytest-xdist
filelock==3.12.4
# via virtualenv
frozenlist==1.6.2
# via aiohttp
# via aiosignal
h11==0.14.0
# via httpcore
httpcore==1.0.2
# via httpx
httpx==0.28.1
# via httpx-aiohttp
# via llama-api-client
# via respx
httpx-aiohttp==0.1.6
# via llama-api-client
idna==3.4
# via anyio
# via httpx
# via yarl
importlib-metadata==7.0.0
iniconfig==2.0.0
# via pytest
markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
multidict==6.4.4
# via aiohttp
# via yarl
mypy==1.14.1
mypy-extensions==1.0.0
# via mypy
Expand All @@ -63,6 +86,9 @@ platformdirs==3.11.0
# via virtualenv
pluggy==1.5.0
# via pytest
propcache==0.3.1
# via aiohttp
# via yarl
pydantic==2.10.3
# via llama-api-client
pydantic-core==2.27.1
Expand All @@ -72,7 +98,9 @@ pygments==2.18.0
pyright==1.1.399
pytest==8.3.3
# via pytest-asyncio
# via pytest-xdist
pytest-asyncio==0.24.0
pytest-xdist==3.7.0
python-dateutil==2.8.2
# via time-machine
pytz==2023.3.post1
Expand All @@ -94,11 +122,14 @@ tomli==2.0.2
typing-extensions==4.12.2
# via anyio
# via llama-api-client
# via multidict
# via mypy
# via pydantic
# via pydantic-core
# via pyright
virtualenv==20.24.5
# via nox
yarl==1.20.0
# via aiohttp
zipp==3.17.0
# via importlib-metadata
27 changes: 27 additions & 0 deletions requirements.lock
Original file line number Diff line number Diff line change
Expand Up @@ -10,27 +10,51 @@
# universal: false

-e file:.
aiohappyeyeballs==2.6.1
# via aiohttp
aiohttp==3.12.8
# via httpx-aiohttp
# via llama-api-client
aiosignal==1.3.2
# via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.4.0
# via httpx
# via llama-api-client
async-timeout==5.0.1
# via aiohttp
attrs==25.3.0
# via aiohttp
certifi==2023.7.22
# via httpcore
# via httpx
distro==1.8.0
# via llama-api-client
exceptiongroup==1.2.2
# via anyio
frozenlist==1.6.2
# via aiohttp
# via aiosignal
h11==0.14.0
# via httpcore
httpcore==1.0.2
# via httpx
httpx==0.28.1
# via httpx-aiohttp
# via llama-api-client
httpx-aiohttp==0.1.6
# via llama-api-client
idna==3.4
# via anyio
# via httpx
# via yarl
multidict==6.4.4
# via aiohttp
# via yarl
propcache==0.3.1
# via aiohttp
# via yarl
pydantic==2.10.3
# via llama-api-client
pydantic-core==2.27.1
Expand All @@ -41,5 +65,8 @@ sniffio==1.3.0
typing-extensions==4.12.2
# via anyio
# via llama-api-client
# via multidict
# via pydantic
# via pydantic-core
yarl==1.20.0
# via aiohttp
3 changes: 2 additions & 1 deletion src/llama_api_client/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
UnprocessableEntityError,
APIResponseValidationError,
)
from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient
from ._utils._logs import setup_logging as _setup_logging

__all__ = [
Expand Down Expand Up @@ -78,6 +78,7 @@
"DEFAULT_CONNECTION_LIMITS",
"DefaultHttpxClient",
"DefaultAsyncHttpxClient",
"DefaultAioHttpClient",
]

if not _t.TYPE_CHECKING:
Expand Down
46 changes: 44 additions & 2 deletions src/llama_api_client/_base_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -960,6 +960,9 @@ def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth

if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects

log.debug("Sending HTTP Request: %s %s", request.method, request.url)

response = None
Expand Down Expand Up @@ -1068,7 +1071,14 @@ def _process_response(
) -> ResponseT:
origin = get_origin(cast_to) or cast_to

if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
if (
inspect.isclass(origin)
and issubclass(origin, BaseAPIResponse)
# we only want to actually return the custom BaseAPIResponse class if we're
# returning the raw response, or if we're not streaming SSE, as if we're streaming
# SSE then `cast_to` doesn't actively reflect the type we need to parse into
and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
):
if not issubclass(origin, APIResponse):
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")

Expand Down Expand Up @@ -1279,6 +1289,24 @@ def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)


try:
import httpx_aiohttp
except ImportError:

class _DefaultAioHttpClient(httpx.AsyncClient):
def __init__(self, **_kwargs: Any) -> None:
raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra")
else:

class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore
def __init__(self, **kwargs: Any) -> None:
kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
kwargs.setdefault("follow_redirects", True)

super().__init__(**kwargs)


if TYPE_CHECKING:
DefaultAsyncHttpxClient = httpx.AsyncClient
"""An alias to `httpx.AsyncClient` that provides the same defaults that this SDK
Expand All @@ -1287,8 +1315,12 @@ def __init__(self, **kwargs: Any) -> None:
This is useful because overriding the `http_client` with your own instance of
`httpx.AsyncClient` will result in httpx's defaults being used, not ours.
"""

DefaultAioHttpClient = httpx.AsyncClient
"""An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`."""
else:
DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient
DefaultAioHttpClient = _DefaultAioHttpClient


class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient):
Expand Down Expand Up @@ -1460,6 +1492,9 @@ async def request(
if self.custom_auth is not None:
kwargs["auth"] = self.custom_auth

if options.follow_redirects is not None:
kwargs["follow_redirects"] = options.follow_redirects

log.debug("Sending HTTP Request: %s %s", request.method, request.url)

response = None
Expand Down Expand Up @@ -1568,7 +1603,14 @@ async def _process_response(
) -> ResponseT:
origin = get_origin(cast_to) or cast_to

if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
if (
inspect.isclass(origin)
and issubclass(origin, BaseAPIResponse)
# we only want to actually return the custom BaseAPIResponse class if we're
# returning the raw response, or if we're not streaming SSE, as if we're streaming
# SSE then `cast_to` doesn't actively reflect the type we need to parse into
and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
):
if not issubclass(origin, AsyncAPIResponse):
raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")

Expand Down
2 changes: 2 additions & 0 deletions src/llama_api_client/_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,6 +737,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
idempotency_key: str
json_data: Body
extra_json: AnyMapping
follow_redirects: bool


@final
Expand All @@ -750,6 +751,7 @@ class FinalRequestOptions(pydantic.BaseModel):
files: Union[HttpxRequestFiles, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
follow_redirects: Union[bool, None] = None

# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
Expand Down
2 changes: 2 additions & 0 deletions src/llama_api_client/_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False):
params: Query
extra_json: AnyMapping
idempotency_key: str
follow_redirects: bool


# Sentinel class used until PEP 0661 is accepted
Expand Down Expand Up @@ -215,3 +216,4 @@ class _GenericAlias(Protocol):

class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
follow_redirects: bool
2 changes: 1 addition & 1 deletion src/llama_api_client/_version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.

__title__ = "llama_api_client"
__version__ = "0.1.1"
__version__ = "0.1.2"
4 changes: 3 additions & 1 deletion tests/api_resources/chat/test_completions.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,9 @@ def test_streaming_response_create_overload_2(self, client: LlamaAPIClient) -> N


class TestAsyncCompletions:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)

@pytest.mark.skip()
@parametrize
Expand Down
4 changes: 3 additions & 1 deletion tests/api_resources/test_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,9 @@ def test_streaming_response_list(self, client: LlamaAPIClient) -> None:


class TestAsyncModels:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)

@pytest.mark.skip()
@parametrize
Expand Down
4 changes: 3 additions & 1 deletion tests/api_resources/test_moderations.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ def test_streaming_response_create(self, client: LlamaAPIClient) -> None:


class TestAsyncModerations:
parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
parametrize = pytest.mark.parametrize(
"async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
)

@pytest.mark.skip()
@parametrize
Expand Down
Loading