diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 81f6dc2..27e937e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,17 +1,23 @@
name: CI
on:
push:
- branches:
- - main
+ branches-ignore:
+ - 'generated'
+ - 'codegen/**'
+ - 'integrated/**'
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
pull_request:
- branches:
- - main
- - next
+ branches-ignore:
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
jobs:
lint:
+ timeout-minutes: 10
name: lint
- runs-on: ubuntu-latest
+ runs-on: ${{ github.repository == 'stainless-sdks/agility-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- uses: actions/checkout@v4
@@ -29,9 +35,49 @@ jobs:
- name: Run lints
run: ./scripts/lint
+ build:
+ if: github.repository == 'stainless-sdks/agility-python' && (github.event_name == 'push' || github.event.pull_request.head.repo.fork)
+ timeout-minutes: 10
+ name: build
+ permissions:
+ contents: read
+ id-token: write
+ runs-on: depot-ubuntu-24.04
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Install Rye
+ run: |
+ curl -sSf https://rye.astral.sh/get | bash
+ echo "$HOME/.rye/shims" >> $GITHUB_PATH
+ env:
+ RYE_VERSION: '0.44.0'
+ RYE_INSTALL_OPTION: '--yes'
+
+ - name: Install dependencies
+ run: rye sync --all-features
+
+ - name: Run build
+ run: rye build
+
+ - name: Get GitHub OIDC Token
+ id: github-oidc
+ uses: actions/github-script@v6
+ with:
+ script: core.setOutput('github_token', await core.getIDToken());
+
+ - name: Upload tarball
+ env:
+ URL: https://pkg.stainless.com/s
+ AUTH: ${{ steps.github-oidc.outputs.github_token }}
+ SHA: ${{ github.sha }}
+ run: ./scripts/utils/upload-artifact.sh
+
test:
+ timeout-minutes: 10
name: test
- runs-on: ubuntu-latest
+ runs-on: ${{ github.repository == 'stainless-sdks/agility-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.fork
steps:
- uses: actions/checkout@v4
diff --git a/.gitignore b/.gitignore
index 8779740..95ceb18 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
.prism.log
-.vscode
_dev
__pycache__
diff --git a/.stats.yml b/.stats.yml
index 57c3565..29213f9 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 42
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/cleanlab%2Fagility-bb543bebe38d4cc889a3fa1ebc212458cd4321233d904357be98f8b22db82960.yml
-openapi_spec_hash: 7a30a005e382a8db9fafa55903c3a977
-config_hash: 6d2156cfe279456cf3c35ba5c66be1c1
+configured_endpoints: 43
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/cleanlab%2Fagility-9e6009a931b636947a7410707ffb4db68f9a00d6f7e3ec6e55c365f883c50223.yml
+openapi_spec_hash: 264626d871113465d14672d73e910c03
+config_hash: 58f3e6b15392ca51b942e41597d56e7f
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100644
index 0000000..5b01030
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,3 @@
+{
+ "python.analysis.importFormat": "relative",
+}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index ee8eff6..98455be 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -17,8 +17,7 @@ $ rye sync --all-features
You can then run scripts using `rye run python script.py` or by activating the virtual environment:
```sh
-$ rye shell
-# or manually activate - https://docs.python.org/3/library/venv.html#how-venvs-work
+# Activate the virtual environment - https://docs.python.org/3/library/venv.html#how-venvs-work
$ source .venv/bin/activate
# now you can omit the `rye run` prefix
diff --git a/SECURITY.md b/SECURITY.md
index 9c3e857..5ec6f47 100644
--- a/SECURITY.md
+++ b/SECURITY.md
@@ -16,11 +16,11 @@ before making any information public.
## Reporting Non-SDK Related Security Issues
If you encounter security issues that are not directly related to SDKs but pertain to the services
-or products provided by Agility please follow the respective company's security reporting guidelines.
+or products provided by Agility, please follow the respective company's security reporting guidelines.
### Agility Terms and Policies
-Please contact dev-feedback@agility.com for any questions or concerns regarding security of our services.
+Please contact dev-feedback@agility.com for any questions or concerns regarding the security of our services.
---
diff --git a/api.md b/api.md
index 7b79cef..921bd9a 100644
--- a/api.md
+++ b/api.md
@@ -3,7 +3,12 @@
Types:
```python
-from agility.types import Assistant, AssistantWithConfig, AssistantListResponse
+from agility.types import (
+ Assistant,
+ AssistantWithConfig,
+ AssistantListResponse,
+ AssistantRetrieveRunMetadataResponse,
+)
```
Methods:
@@ -13,6 +18,7 @@ Methods:
- client.assistants.update(assistant_id, \*\*params) -> AssistantWithConfig
- client.assistants.list(\*\*params) -> SyncMyOffsetPage[AssistantListResponse]
- client.assistants.delete(assistant_id) -> None
+- client.assistants.retrieve_run_metadata(run_id, \*, assistant_id) -> AssistantRetrieveRunMetadataResponse
## AccessKeys
@@ -48,7 +54,7 @@ Methods:
Types:
```python
-from agility.types.knowledge_bases import Source, SourceStatusResponse, SourceSyncResponse
+from agility.types.knowledge_bases import Source, SourceStatusResponse
```
Methods:
@@ -59,7 +65,7 @@ Methods:
- client.knowledge_bases.sources.list(knowledge_base_id, \*\*params) -> SyncMyOffsetPage[Source]
- client.knowledge_bases.sources.delete(source_id, \*, knowledge_base_id) -> None
- client.knowledge_bases.sources.status(source_id, \*, knowledge_base_id) -> SourceStatusResponse
-- client.knowledge_bases.sources.sync(source_id, \*, knowledge_base_id) -> object
+- client.knowledge_bases.sources.sync(source_id, \*, knowledge_base_id) -> object
### Documents
@@ -134,7 +140,7 @@ Methods:
Types:
```python
-from agility.types.threads import Run, RunStreamResponse
+from agility.types.threads import Run
```
Methods:
@@ -142,7 +148,7 @@ Methods:
- client.threads.runs.create(thread_id, \*\*params) -> Run
- client.threads.runs.retrieve(run_id, \*, thread_id) -> Run
- client.threads.runs.delete(run_id, \*, thread_id) -> None
-- client.threads.runs.stream(thread_id, \*\*params) -> object
+- client.threads.runs.stream(thread_id, \*\*params) -> object
# Integrations
diff --git a/pyproject.toml b/pyproject.toml
index 2e3362e..a386b01 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -24,6 +24,7 @@ classifiers = [
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
+ "Programming Language :: Python :: 3.13",
"Operating System :: OS Independent",
"Operating System :: POSIX",
"Operating System :: MacOS",
@@ -37,12 +38,14 @@ classifiers = [
Homepage = "https://github.com/stainless-sdks/agility-python"
Repository = "https://github.com/stainless-sdks/agility-python"
+[project.optional-dependencies]
+aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"]
[tool.rye]
managed = true
# version pins are in requirements-dev.lock
dev-dependencies = [
- "pyright>=1.1.359",
+ "pyright==1.1.399",
"mypy",
"respx",
"pytest",
@@ -54,6 +57,7 @@ dev-dependencies = [
"importlib-metadata>=6.7.0",
"rich>=13.7.1",
"nest_asyncio==1.6.0",
+ "pytest-xdist>=3.6.1",
]
[tool.rye.scripts]
@@ -125,7 +129,7 @@ replacement = '[\1](https://github.com/stainless-sdks/agility-python/tree/main/\
[tool.pytest.ini_options]
testpaths = ["tests"]
-addopts = "--tb=short"
+addopts = "--tb=short -n auto"
xfail_strict = true
asyncio_mode = "auto"
asyncio_default_fixture_loop_scope = "session"
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 2ecab23..d71d5f6 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -10,6 +10,13 @@
# universal: false
-e file:.
+aiohappyeyeballs==2.6.1
+ # via aiohttp
+aiohttp==3.12.8
+ # via agility
+ # via httpx-aiohttp
+aiosignal==1.3.2
+ # via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.4.0
@@ -17,6 +24,10 @@ anyio==4.4.0
# via httpx
argcomplete==3.1.2
# via nox
+async-timeout==5.0.1
+ # via aiohttp
+attrs==25.3.0
+ # via aiohttp
certifi==2023.7.22
# via httpcore
# via httpx
@@ -30,18 +41,27 @@ distro==1.8.0
exceptiongroup==1.2.2
# via anyio
# via pytest
+execnet==2.1.1
+ # via pytest-xdist
filelock==3.12.4
# via virtualenv
-h11==0.14.0
+frozenlist==1.6.2
+ # via aiohttp
+ # via aiosignal
+h11==0.16.0
# via httpcore
-httpcore==1.0.2
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via agility
+ # via httpx-aiohttp
# via respx
+httpx-aiohttp==0.1.8
+ # via agility
idna==3.4
# via anyio
# via httpx
+ # via yarl
importlib-metadata==7.0.0
iniconfig==2.0.0
# via pytest
@@ -49,6 +69,9 @@ markdown-it-py==3.0.0
# via rich
mdurl==0.1.2
# via markdown-it-py
+multidict==6.4.4
+ # via aiohttp
+ # via yarl
mypy==1.14.1
mypy-extensions==1.0.0
# via mypy
@@ -63,16 +86,21 @@ platformdirs==3.11.0
# via virtualenv
pluggy==1.5.0
# via pytest
+propcache==0.3.1
+ # via aiohttp
+ # via yarl
pydantic==2.10.3
# via agility
pydantic-core==2.27.1
# via pydantic
pygments==2.18.0
# via rich
-pyright==1.1.392.post0
+pyright==1.1.399
pytest==8.3.3
# via pytest-asyncio
+ # via pytest-xdist
pytest-asyncio==0.24.0
+pytest-xdist==3.7.0
python-dateutil==2.8.2
# via time-machine
pytz==2023.3.post1
@@ -94,11 +122,14 @@ tomli==2.0.2
typing-extensions==4.12.2
# via agility
# via anyio
+ # via multidict
# via mypy
# via pydantic
# via pydantic-core
# via pyright
virtualenv==20.24.5
# via nox
+yarl==1.20.0
+ # via aiohttp
zipp==3.17.0
# via importlib-metadata
diff --git a/requirements.lock b/requirements.lock
index 8f0aee0..cff9a9a 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -10,11 +10,22 @@
# universal: false
-e file:.
+aiohappyeyeballs==2.6.1
+ # via aiohttp
+aiohttp==3.12.8
+ # via agility
+ # via httpx-aiohttp
+aiosignal==1.3.2
+ # via aiohttp
annotated-types==0.6.0
# via pydantic
anyio==4.4.0
# via agility
# via httpx
+async-timeout==5.0.1
+ # via aiohttp
+attrs==25.3.0
+ # via aiohttp
certifi==2023.7.22
# via httpcore
# via httpx
@@ -22,15 +33,28 @@ distro==1.8.0
# via agility
exceptiongroup==1.2.2
# via anyio
-h11==0.14.0
+frozenlist==1.6.2
+ # via aiohttp
+ # via aiosignal
+h11==0.16.0
# via httpcore
-httpcore==1.0.2
+httpcore==1.0.9
# via httpx
httpx==0.28.1
# via agility
+ # via httpx-aiohttp
+httpx-aiohttp==0.1.8
+ # via agility
idna==3.4
# via anyio
# via httpx
+ # via yarl
+multidict==6.4.4
+ # via aiohttp
+ # via yarl
+propcache==0.3.1
+ # via aiohttp
+ # via yarl
pydantic==2.10.3
# via agility
pydantic-core==2.27.1
@@ -41,5 +65,8 @@ sniffio==1.3.0
typing-extensions==4.12.2
# via agility
# via anyio
+ # via multidict
# via pydantic
# via pydantic-core
+yarl==1.20.0
+ # via aiohttp
diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh
new file mode 100755
index 0000000..e2b5a43
--- /dev/null
+++ b/scripts/utils/upload-artifact.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+set -exuo pipefail
+
+FILENAME=$(basename dist/*.whl)
+
+RESPONSE=$(curl -X POST "$URL?filename=$FILENAME" \
+ -H "Authorization: Bearer $AUTH" \
+ -H "Content-Type: application/json")
+
+SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url')
+
+if [[ "$SIGNED_URL" == "null" ]]; then
+ echo -e "\033[31mFailed to get signed URL.\033[0m"
+ exit 1
+fi
+
+UPLOAD_RESPONSE=$(curl -v -X PUT \
+ -H "Content-Type: binary/octet-stream" \
+ --data-binary "@dist/$FILENAME" "$SIGNED_URL" 2>&1)
+
+if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then
+ echo -e "\033[32mUploaded build to Stainless storage.\033[0m"
+ echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/agility-python/$SHA/$FILENAME'\033[0m"
+else
+ echo -e "\033[31mFailed to upload artifact.\033[0m"
+ exit 1
+fi
diff --git a/src/agility/__init__.py b/src/agility/__init__.py
index ec35654..9a1b783 100644
--- a/src/agility/__init__.py
+++ b/src/agility/__init__.py
@@ -1,5 +1,7 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+import typing as _t
+
from . import types
from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes
from ._utils import file_from_path
@@ -35,7 +37,7 @@
UnprocessableEntityError,
APIResponseValidationError,
)
-from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
+from ._base_client import DefaultHttpxClient, DefaultAioHttpClient, DefaultAsyncHttpxClient
from ._utils._logs import setup_logging as _setup_logging
__all__ = [
@@ -78,8 +80,12 @@
"DEFAULT_CONNECTION_LIMITS",
"DefaultHttpxClient",
"DefaultAsyncHttpxClient",
+ "DefaultAioHttpClient",
]
+if not _t.TYPE_CHECKING:
+ from ._utils._resources_proxy import resources as resources
+
_setup_logging()
# Update the __module__ attribute for exported symbols so that
diff --git a/src/agility/_base_client.py b/src/agility/_base_client.py
index 2de45b2..9e078f1 100644
--- a/src/agility/_base_client.py
+++ b/src/agility/_base_client.py
@@ -98,7 +98,11 @@
_AsyncStreamT = TypeVar("_AsyncStreamT", bound=AsyncStream[Any])
if TYPE_CHECKING:
- from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
+ from httpx._config import (
+ DEFAULT_TIMEOUT_CONFIG, # pyright: ignore[reportPrivateImportUsage]
+ )
+
+ HTTPX_DEFAULT_TIMEOUT = DEFAULT_TIMEOUT_CONFIG
else:
try:
from httpx._config import DEFAULT_TIMEOUT_CONFIG as HTTPX_DEFAULT_TIMEOUT
@@ -115,6 +119,7 @@ class PageInfo:
url: URL | NotGiven
params: Query | NotGiven
+ json: Body | NotGiven
@overload
def __init__(
@@ -130,19 +135,30 @@ def __init__(
params: Query,
) -> None: ...
+ @overload
+ def __init__(
+ self,
+ *,
+ json: Body,
+ ) -> None: ...
+
def __init__(
self,
*,
url: URL | NotGiven = NOT_GIVEN,
+ json: Body | NotGiven = NOT_GIVEN,
params: Query | NotGiven = NOT_GIVEN,
) -> None:
self.url = url
+ self.json = json
self.params = params
@override
def __repr__(self) -> str:
if self.url:
return f"{self.__class__.__name__}(url={self.url})"
+ if self.json:
+ return f"{self.__class__.__name__}(json={self.json})"
return f"{self.__class__.__name__}(params={self.params})"
@@ -191,6 +207,19 @@ def _info_to_options(self, info: PageInfo) -> FinalRequestOptions:
options.url = str(url)
return options
+ if not isinstance(info.json, NotGiven):
+ if not is_mapping(info.json):
+ raise TypeError("Pagination is only supported with mappings")
+
+ if not options.json_data:
+ options.json_data = {**info.json}
+ else:
+ if not is_mapping(options.json_data):
+ raise TypeError("Pagination is only supported with mappings")
+
+ options.json_data = {**options.json_data, **info.json}
+ return options
+
raise ValueError("Unexpected PageInfo state")
@@ -408,8 +437,7 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0
headers = httpx.Headers(headers_dict)
idempotency_header = self._idempotency_header
- if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
- options.idempotency_key = options.idempotency_key or self._idempotency_key()
+ if idempotency_header and options.idempotency_key and idempotency_header not in headers:
headers[idempotency_header] = options.idempotency_key
# Don't set these headers if they were already set or removed by the caller. We check
@@ -501,6 +529,18 @@ def _build_request(
# work around https://github.com/encode/httpx/discussions/2880
kwargs["extensions"] = {"sni_hostname": prepared_url.host.replace("_", "-")}
+ is_body_allowed = options.method.lower() != "get"
+
+ if is_body_allowed:
+ if isinstance(json_data, bytes):
+ kwargs["content"] = json_data
+ else:
+ kwargs["json"] = json_data if is_given(json_data) else None
+ kwargs["files"] = files
+ else:
+ headers.pop("Content-Type", None)
+ kwargs.pop("data", None)
+
# TODO: report this error to httpx
return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
headers=headers,
@@ -512,8 +552,6 @@ def _build_request(
# so that passing a `TypedDict` doesn't cause an error.
# https://github.com/microsoft/pyright/issues/3526#event-6715453066
params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
- json=json_data if is_given(json_data) else None,
- files=files,
**kwargs,
)
@@ -874,7 +912,6 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: Literal[True],
stream_cls: Type[_StreamT],
@@ -885,7 +922,6 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: Literal[False] = False,
) -> ResponseT: ...
@@ -895,7 +931,6 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: bool = False,
stream_cls: Type[_StreamT] | None = None,
@@ -905,125 +940,112 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
- if remaining_retries is not None:
- retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
- else:
- retries_taken = 0
-
- return self._request(
- cast_to=cast_to,
- options=options,
- stream=stream,
- stream_cls=stream_cls,
- retries_taken=retries_taken,
- )
+ cast_to = self._maybe_override_cast_to(cast_to, options)
- def _request(
- self,
- *,
- cast_to: Type[ResponseT],
- options: FinalRequestOptions,
- retries_taken: int,
- stream: bool,
- stream_cls: type[_StreamT] | None,
- ) -> ResponseT | _StreamT:
# create a copy of the options we were given so that if the
# options are mutated later & we then retry, the retries are
# given the original options
input_options = model_copy(options)
-
- cast_to = self._maybe_override_cast_to(cast_to, options)
- options = self._prepare_options(options)
-
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
- request = self._build_request(options, retries_taken=retries_taken)
- self._prepare_request(request)
-
- if options.idempotency_key:
+ if input_options.idempotency_key is None and input_options.method.lower() != "get":
# ensure the idempotency key is reused between requests
- input_options.idempotency_key = options.idempotency_key
+ input_options.idempotency_key = self._idempotency_key()
- kwargs: HttpxSendArgs = {}
- if self.custom_auth is not None:
- kwargs["auth"] = self.custom_auth
+ response: httpx.Response | None = None
+ max_retries = input_options.get_max_retries(self.max_retries)
- log.debug("Sending HTTP Request: %s %s", request.method, request.url)
+ retries_taken = 0
+ for retries_taken in range(max_retries + 1):
+ options = model_copy(input_options)
+ options = self._prepare_options(options)
- try:
- response = self._client.send(
- request,
- stream=stream or self._should_stream_response_body(request=request),
- **kwargs,
- )
- except httpx.TimeoutException as err:
- log.debug("Encountered httpx.TimeoutException", exc_info=True)
+ remaining_retries = max_retries - retries_taken
+ request = self._build_request(options, retries_taken=retries_taken)
+ self._prepare_request(request)
- if remaining_retries > 0:
- return self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
- )
+ kwargs: HttpxSendArgs = {}
+ if self.custom_auth is not None:
+ kwargs["auth"] = self.custom_auth
- log.debug("Raising timeout error")
- raise APITimeoutError(request=request) from err
- except Exception as err:
- log.debug("Encountered Exception", exc_info=True)
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
- if remaining_retries > 0:
- return self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
- )
+ log.debug("Sending HTTP Request: %s %s", request.method, request.url)
- log.debug("Raising connection error")
- raise APIConnectionError(request=request) from err
+ response = None
+ try:
+ response = self._client.send(
+ request,
+ stream=stream or self._should_stream_response_body(request=request),
+ **kwargs,
+ )
+ except httpx.TimeoutException as err:
+ log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+ if remaining_retries > 0:
+ self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising timeout error")
+ raise APITimeoutError(request=request) from err
+ except Exception as err:
+ log.debug("Encountered Exception", exc_info=True)
+
+ if remaining_retries > 0:
+ self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising connection error")
+ raise APIConnectionError(request=request) from err
+
+ log.debug(
+ 'HTTP Response: %s %s "%i %s" %s',
+ request.method,
+ request.url,
+ response.status_code,
+ response.reason_phrase,
+ response.headers,
+ )
- log.debug(
- 'HTTP Response: %s %s "%i %s" %s',
- request.method,
- request.url,
- response.status_code,
- response.reason_phrase,
- response.headers,
- )
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
+ log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+ if remaining_retries > 0 and self._should_retry(err.response):
+ err.response.close()
+ self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=response,
+ )
+ continue
- try:
- response.raise_for_status()
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
- log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
-
- if remaining_retries > 0 and self._should_retry(err.response):
- err.response.close()
- return self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- response_headers=err.response.headers,
- stream=stream,
- stream_cls=stream_cls,
- )
+ # If the response is streamed then we need to explicitly read the response
+ # to completion before attempting to access the response text.
+ if not err.response.is_closed:
+ err.response.read()
- # If the response is streamed then we need to explicitly read the response
- # to completion before attempting to access the response text.
- if not err.response.is_closed:
- err.response.read()
+ log.debug("Re-raising status error")
+ raise self._make_status_error_from_response(err.response) from None
- log.debug("Re-raising status error")
- raise self._make_status_error_from_response(err.response) from None
+ break
+ assert response is not None, "could not resolve response (should never happen)"
return self._process_response(
cast_to=cast_to,
options=options,
@@ -1033,37 +1055,20 @@ def _request(
retries_taken=retries_taken,
)
- def _retry_request(
- self,
- options: FinalRequestOptions,
- cast_to: Type[ResponseT],
- *,
- retries_taken: int,
- response_headers: httpx.Headers | None,
- stream: bool,
- stream_cls: type[_StreamT] | None,
- ) -> ResponseT | _StreamT:
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+ def _sleep_for_retry(
+ self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+ ) -> None:
+ remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining_retries)
- timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+ timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
- # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
- # different thread if necessary.
time.sleep(timeout)
- return self._request(
- options=options,
- cast_to=cast_to,
- retries_taken=retries_taken + 1,
- stream=stream,
- stream_cls=stream_cls,
- )
-
def _process_response(
self,
*,
@@ -1076,7 +1081,14 @@ def _process_response(
) -> ResponseT:
origin = get_origin(cast_to) or cast_to
- if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
+ if (
+ inspect.isclass(origin)
+ and issubclass(origin, BaseAPIResponse)
+ # we only want to actually return the custom BaseAPIResponse class if we're
+ # returning the raw response, or if we're not streaming SSE, as if we're streaming
+ # SSE then `cast_to` doesn't actively reflect the type we need to parse into
+ and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
+ ):
if not issubclass(origin, APIResponse):
raise TypeError(f"API Response types must subclass {APIResponse}; Received {origin}")
@@ -1287,6 +1299,24 @@ def __init__(self, **kwargs: Any) -> None:
super().__init__(**kwargs)
+try:
+ import httpx_aiohttp
+except ImportError:
+
+ class _DefaultAioHttpClient(httpx.AsyncClient):
+ def __init__(self, **_kwargs: Any) -> None:
+ raise RuntimeError("To use the aiohttp client you must have installed the package with the `aiohttp` extra")
+else:
+
+ class _DefaultAioHttpClient(httpx_aiohttp.HttpxAiohttpClient): # type: ignore
+ def __init__(self, **kwargs: Any) -> None:
+ kwargs.setdefault("timeout", DEFAULT_TIMEOUT)
+ kwargs.setdefault("limits", DEFAULT_CONNECTION_LIMITS)
+ kwargs.setdefault("follow_redirects", True)
+
+ super().__init__(**kwargs)
+
+
if TYPE_CHECKING:
DefaultAsyncHttpxClient = httpx.AsyncClient
"""An alias to `httpx.AsyncClient` that provides the same defaults that this SDK
@@ -1295,8 +1325,12 @@ def __init__(self, **kwargs: Any) -> None:
This is useful because overriding the `http_client` with your own instance of
`httpx.AsyncClient` will result in httpx's defaults being used, not ours.
"""
+
+ DefaultAioHttpClient = httpx.AsyncClient
+ """An alias to `httpx.AsyncClient` that changes the default HTTP transport to `aiohttp`."""
else:
DefaultAsyncHttpxClient = _DefaultAsyncHttpxClient
+ DefaultAioHttpClient = _DefaultAioHttpClient
class AsyncHttpxClientWrapper(DefaultAsyncHttpxClient):
@@ -1407,7 +1441,6 @@ async def request(
options: FinalRequestOptions,
*,
stream: Literal[False] = False,
- remaining_retries: Optional[int] = None,
) -> ResponseT: ...
@overload
@@ -1418,7 +1451,6 @@ async def request(
*,
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
- remaining_retries: Optional[int] = None,
) -> _AsyncStreamT: ...
@overload
@@ -1429,7 +1461,6 @@ async def request(
*,
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
- remaining_retries: Optional[int] = None,
) -> ResponseT | _AsyncStreamT: ...
async def request(
@@ -1439,120 +1470,114 @@ async def request(
*,
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
- remaining_retries: Optional[int] = None,
- ) -> ResponseT | _AsyncStreamT:
- if remaining_retries is not None:
- retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
- else:
- retries_taken = 0
-
- return await self._request(
- cast_to=cast_to,
- options=options,
- stream=stream,
- stream_cls=stream_cls,
- retries_taken=retries_taken,
- )
-
- async def _request(
- self,
- cast_to: Type[ResponseT],
- options: FinalRequestOptions,
- *,
- stream: bool,
- stream_cls: type[_AsyncStreamT] | None,
- retries_taken: int,
) -> ResponseT | _AsyncStreamT:
if self._platform is None:
# `get_platform` can make blocking IO calls so we
# execute it earlier while we are in an async context
self._platform = await asyncify(get_platform)()
+ cast_to = self._maybe_override_cast_to(cast_to, options)
+
# create a copy of the options we were given so that if the
# options are mutated later & we then retry, the retries are
# given the original options
input_options = model_copy(options)
+ if input_options.idempotency_key is None and input_options.method.lower() != "get":
+ # ensure the idempotency key is reused between requests
+ input_options.idempotency_key = self._idempotency_key()
- cast_to = self._maybe_override_cast_to(cast_to, options)
- options = await self._prepare_options(options)
+ response: httpx.Response | None = None
+ max_retries = input_options.get_max_retries(self.max_retries)
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
- request = self._build_request(options, retries_taken=retries_taken)
- await self._prepare_request(request)
+ retries_taken = 0
+ for retries_taken in range(max_retries + 1):
+ options = model_copy(input_options)
+ options = await self._prepare_options(options)
- if options.idempotency_key:
- # ensure the idempotency key is reused between requests
- input_options.idempotency_key = options.idempotency_key
+ remaining_retries = max_retries - retries_taken
+ request = self._build_request(options, retries_taken=retries_taken)
+ await self._prepare_request(request)
- kwargs: HttpxSendArgs = {}
- if self.custom_auth is not None:
- kwargs["auth"] = self.custom_auth
+ kwargs: HttpxSendArgs = {}
+ if self.custom_auth is not None:
+ kwargs["auth"] = self.custom_auth
- try:
- response = await self._client.send(
- request,
- stream=stream or self._should_stream_response_body(request=request),
- **kwargs,
- )
- except httpx.TimeoutException as err:
- log.debug("Encountered httpx.TimeoutException", exc_info=True)
+ if options.follow_redirects is not None:
+ kwargs["follow_redirects"] = options.follow_redirects
- if remaining_retries > 0:
- return await self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
- )
+ log.debug("Sending HTTP Request: %s %s", request.method, request.url)
- log.debug("Raising timeout error")
- raise APITimeoutError(request=request) from err
- except Exception as err:
- log.debug("Encountered Exception", exc_info=True)
-
- if remaining_retries > 0:
- return await self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
+ response = None
+ try:
+ response = await self._client.send(
+ request,
+ stream=stream or self._should_stream_response_body(request=request),
+ **kwargs,
)
+ except httpx.TimeoutException as err:
+ log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+ if remaining_retries > 0:
+ await self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising timeout error")
+ raise APITimeoutError(request=request) from err
+ except Exception as err:
+ log.debug("Encountered Exception", exc_info=True)
+
+ if remaining_retries > 0:
+ await self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising connection error")
+ raise APIConnectionError(request=request) from err
+
+ log.debug(
+ 'HTTP Response: %s %s "%i %s" %s',
+ request.method,
+ request.url,
+ response.status_code,
+ response.reason_phrase,
+ response.headers,
+ )
- log.debug("Raising connection error")
- raise APIConnectionError(request=request) from err
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
+ log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+ if remaining_retries > 0 and self._should_retry(err.response):
+ await err.response.aclose()
+ await self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=response,
+ )
+ continue
- log.debug(
- 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
- )
+ # If the response is streamed then we need to explicitly read the response
+ # to completion before attempting to access the response text.
+ if not err.response.is_closed:
+ await err.response.aread()
- try:
- response.raise_for_status()
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
- log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
-
- if remaining_retries > 0 and self._should_retry(err.response):
- await err.response.aclose()
- return await self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- response_headers=err.response.headers,
- stream=stream,
- stream_cls=stream_cls,
- )
+ log.debug("Re-raising status error")
+ raise self._make_status_error_from_response(err.response) from None
- # If the response is streamed then we need to explicitly read the response
- # to completion before attempting to access the response text.
- if not err.response.is_closed:
- await err.response.aread()
-
- log.debug("Re-raising status error")
- raise self._make_status_error_from_response(err.response) from None
+ break
+ assert response is not None, "could not resolve response (should never happen)"
return await self._process_response(
cast_to=cast_to,
options=options,
@@ -1562,35 +1587,20 @@ async def _request(
retries_taken=retries_taken,
)
- async def _retry_request(
- self,
- options: FinalRequestOptions,
- cast_to: Type[ResponseT],
- *,
- retries_taken: int,
- response_headers: httpx.Headers | None,
- stream: bool,
- stream_cls: type[_AsyncStreamT] | None,
- ) -> ResponseT | _AsyncStreamT:
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+ async def _sleep_for_retry(
+ self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+ ) -> None:
+ remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining_retries)
- timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+ timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
await anyio.sleep(timeout)
- return await self._request(
- options=options,
- cast_to=cast_to,
- retries_taken=retries_taken + 1,
- stream=stream,
- stream_cls=stream_cls,
- )
-
async def _process_response(
self,
*,
@@ -1603,7 +1613,14 @@ async def _process_response(
) -> ResponseT:
origin = get_origin(cast_to) or cast_to
- if inspect.isclass(origin) and issubclass(origin, BaseAPIResponse):
+ if (
+ inspect.isclass(origin)
+ and issubclass(origin, BaseAPIResponse)
+ # we only want to actually return the custom BaseAPIResponse class if we're
+ # returning the raw response, or if we're not streaming SSE, as if we're streaming
+ # SSE then `cast_to` doesn't actively reflect the type we need to parse into
+ and (not stream or bool(response.request.headers.get(RAW_RESPONSE_HEADER)))
+ ):
if not issubclass(origin, AsyncAPIResponse):
raise TypeError(f"API Response types must subclass {AsyncAPIResponse}; Received {origin}")
diff --git a/src/agility/_client.py b/src/agility/_client.py
index 818130a..a539dcd 100644
--- a/src/agility/_client.py
+++ b/src/agility/_client.py
@@ -19,10 +19,7 @@
ProxiesTypes,
RequestOptions,
)
-from ._utils import (
- is_given,
- get_async_library,
-)
+from ._utils import is_given, get_async_library
from ._version import __version__
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import AgilityError, APIStatusError
diff --git a/src/agility/_files.py b/src/agility/_files.py
index 715cc20..cc14c14 100644
--- a/src/agility/_files.py
+++ b/src/agility/_files.py
@@ -69,12 +69,12 @@ def _transform_file(file: FileTypes) -> HttpxFileTypes:
return file
if is_tuple_t(file):
- return (file[0], _read_file_content(file[1]), *file[2:])
+ return (file[0], read_file_content(file[1]), *file[2:])
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
-def _read_file_content(file: FileContent) -> HttpxFileContent:
+def read_file_content(file: FileContent) -> HttpxFileContent:
if isinstance(file, os.PathLike):
return pathlib.Path(file).read_bytes()
return file
@@ -111,12 +111,12 @@ async def _async_transform_file(file: FileTypes) -> HttpxFileTypes:
return file
if is_tuple_t(file):
- return (file[0], await _async_read_file_content(file[1]), *file[2:])
+ return (file[0], await async_read_file_content(file[1]), *file[2:])
raise TypeError(f"Expected file types input to be a FileContent type or to be a tuple")
-async def _async_read_file_content(file: FileContent) -> HttpxFileContent:
+async def async_read_file_content(file: FileContent) -> HttpxFileContent:
if isinstance(file, os.PathLike):
return await anyio.Path(file).read_bytes()
diff --git a/src/agility/_models.py b/src/agility/_models.py
index 3493571..b8387ce 100644
--- a/src/agility/_models.py
+++ b/src/agility/_models.py
@@ -2,9 +2,10 @@
import os
import inspect
-from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, cast
+from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
from datetime import date, datetime
from typing_extensions import (
+ List,
Unpack,
Literal,
ClassVar,
@@ -19,7 +20,6 @@
)
import pydantic
-import pydantic.generics
from pydantic.fields import FieldInfo
from ._types import (
@@ -208,14 +208,18 @@ def construct( # pyright: ignore[reportIncompatibleMethodOverride]
else:
fields_values[name] = field_get_default(field)
+ extra_field_type = _get_extra_fields_type(__cls)
+
_extra = {}
for key, value in values.items():
if key not in model_fields:
+ parsed = construct_type(value=value, type_=extra_field_type) if extra_field_type is not None else value
+
if PYDANTIC_V2:
- _extra[key] = value
+ _extra[key] = parsed
else:
_fields_set.add(key)
- fields_values[key] = value
+ fields_values[key] = parsed
object.__setattr__(m, "__dict__", fields_values)
@@ -367,7 +371,24 @@ def _construct_field(value: object, field: FieldInfo, key: str) -> object:
if type_ is None:
raise RuntimeError(f"Unexpected field type is None for {key}")
- return construct_type(value=value, type_=type_)
+ return construct_type(value=value, type_=type_, metadata=getattr(field, "metadata", None))
+
+
+def _get_extra_fields_type(cls: type[pydantic.BaseModel]) -> type | None:
+ if not PYDANTIC_V2:
+ # TODO
+ return None
+
+ schema = cls.__pydantic_core_schema__
+ if schema["type"] == "model":
+ fields = schema["schema"]
+ if fields["type"] == "model-fields":
+ extras = fields.get("extras_schema")
+ if extras and "cls" in extras:
+ # mypy can't narrow the type
+ return extras["cls"] # type: ignore[no-any-return]
+
+ return None
def is_basemodel(type_: type) -> bool:
@@ -421,7 +442,7 @@ def construct_type_unchecked(*, value: object, type_: type[_T]) -> _T:
return cast(_T, construct_type(value=value, type_=type_))
-def construct_type(*, value: object, type_: object) -> object:
+def construct_type(*, value: object, type_: object, metadata: Optional[List[Any]] = None) -> object:
"""Loose coercion to the expected type with construction of nested values.
If the given value does not match the expected type then it is returned as-is.
@@ -439,8 +460,10 @@ def construct_type(*, value: object, type_: object) -> object:
type_ = type_.__value__ # type: ignore[unreachable]
# unwrap `Annotated[T, ...]` -> `T`
- if is_annotated_type(type_):
- meta: tuple[Any, ...] = get_args(type_)[1:]
+ if metadata is not None and len(metadata) > 0:
+ meta: tuple[Any, ...] = tuple(metadata)
+ elif is_annotated_type(type_):
+ meta = get_args(type_)[1:]
type_ = extract_type_arg(type_, 0)
else:
meta = tuple()
@@ -627,8 +650,8 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
# Note: if one variant defines an alias then they all should
discriminator_alias = field_info.alias
- if field_info.annotation and is_literal_type(field_info.annotation):
- for entry in get_args(field_info.annotation):
+ if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
+ for entry in get_args(annotation):
if isinstance(entry, str):
mapping[entry] = variant
@@ -738,6 +761,7 @@ class FinalRequestOptionsInput(TypedDict, total=False):
idempotency_key: str
json_data: Body
extra_json: AnyMapping
+ follow_redirects: bool
@final
@@ -751,6 +775,7 @@ class FinalRequestOptions(pydantic.BaseModel):
files: Union[HttpxRequestFiles, None] = None
idempotency_key: Union[str, None] = None
post_parser: Union[Callable[[Any], Any], NotGiven] = NotGiven()
+ follow_redirects: Union[bool, None] = None
# It should be noted that we cannot use `json` here as that would override
# a BaseModel method in an incompatible fashion.
diff --git a/src/agility/_response.py b/src/agility/_response.py
index 9c02d4e..af2e7d1 100644
--- a/src/agility/_response.py
+++ b/src/agility/_response.py
@@ -233,7 +233,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
# split is required to handle cases where additional information is included
# in the response, e.g. application/json; charset=utf-8
content_type, *_ = response.headers.get("content-type", "*").split(";")
- if content_type != "application/json":
+ if not content_type.endswith("json"):
if is_basemodel(cast_to):
try:
data = response.json()
diff --git a/src/agility/_types.py b/src/agility/_types.py
index caac259..d6c3a55 100644
--- a/src/agility/_types.py
+++ b/src/agility/_types.py
@@ -100,6 +100,7 @@ class RequestOptions(TypedDict, total=False):
params: Query
extra_json: AnyMapping
idempotency_key: str
+ follow_redirects: bool
# Sentinel class used until PEP 0661 is accepted
@@ -215,3 +216,4 @@ class _GenericAlias(Protocol):
class HttpxSendArgs(TypedDict, total=False):
auth: httpx.Auth
+ follow_redirects: bool
diff --git a/src/agility/_utils/_proxy.py b/src/agility/_utils/_proxy.py
index ffd883e..0f239a3 100644
--- a/src/agility/_utils/_proxy.py
+++ b/src/agility/_utils/_proxy.py
@@ -46,7 +46,10 @@ def __dir__(self) -> Iterable[str]:
@property # type: ignore
@override
def __class__(self) -> type: # pyright: ignore
- proxied = self.__get_proxied__()
+ try:
+ proxied = self.__get_proxied__()
+ except Exception:
+ return type(self)
if issubclass(type(proxied), LazyProxy):
return type(proxied)
return proxied.__class__
diff --git a/src/agility/_utils/_resources_proxy.py b/src/agility/_utils/_resources_proxy.py
new file mode 100644
index 0000000..0c2f171
--- /dev/null
+++ b/src/agility/_utils/_resources_proxy.py
@@ -0,0 +1,24 @@
+from __future__ import annotations
+
+from typing import Any
+from typing_extensions import override
+
+from ._proxy import LazyProxy
+
+
+class ResourcesProxy(LazyProxy[Any]):
+ """A proxy for the `agility.resources` module.
+
+ This is used so that we can lazily import `agility.resources` only when
+ needed *and* so that users can just import `agility` and reference `agility.resources`
+ """
+
+ @override
+ def __load__(self) -> Any:
+ import importlib
+
+ mod = importlib.import_module("agility.resources")
+ return mod
+
+
+resources = ResourcesProxy().__as_proxied__()
diff --git a/src/agility/_utils/_typing.py b/src/agility/_utils/_typing.py
index 1958820..1bac954 100644
--- a/src/agility/_utils/_typing.py
+++ b/src/agility/_utils/_typing.py
@@ -110,7 +110,7 @@ class MyResponse(Foo[_T]):
```
"""
cls = cast(object, get_origin(typ) or typ)
- if cls in generic_bases:
+ if cls in generic_bases: # pyright: ignore[reportUnnecessaryContains]
# we're given the class directly
return extract_type_arg(typ, index)
diff --git a/src/agility/_utils/_utils.py b/src/agility/_utils/_utils.py
index e5811bb..ea3cf3f 100644
--- a/src/agility/_utils/_utils.py
+++ b/src/agility/_utils/_utils.py
@@ -72,8 +72,16 @@ def _extract_items(
from .._files import assert_is_file_content
# We have exhausted the path, return the entry we found.
- assert_is_file_content(obj, key=flattened_key)
assert flattened_key is not None
+
+ if is_list(obj):
+ files: list[tuple[str, FileTypes]] = []
+ for entry in obj:
+ assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
+ files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ return files
+
+ assert_is_file_content(obj, key=flattened_key)
return [(flattened_key, cast(FileTypes, obj))]
index += 1
diff --git a/src/agility/resources/assistants/access_keys.py b/src/agility/resources/assistants/access_keys.py
index 2b154fe..d6d9733 100644
--- a/src/agility/resources/assistants/access_keys.py
+++ b/src/agility/resources/assistants/access_keys.py
@@ -8,10 +8,7 @@
import httpx
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
diff --git a/src/agility/resources/assistants/assistants.py b/src/agility/resources/assistants/assistants.py
index 995d51d..bb7a442 100644
--- a/src/agility/resources/assistants/assistants.py
+++ b/src/agility/resources/assistants/assistants.py
@@ -9,10 +9,7 @@
from ...types import assistant_list_params, assistant_create_params, assistant_update_params
from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -34,6 +31,7 @@
from ...types.assistant import Assistant
from ...types.assistant_with_config import AssistantWithConfig
from ...types.assistant_list_response import AssistantListResponse
+from ...types.assistant_retrieve_run_metadata_response import AssistantRetrieveRunMetadataResponse
__all__ = ["AssistantsResource", "AsyncAssistantsResource"]
@@ -69,15 +67,14 @@ def create(
knowledge_base_id: Optional[str],
name: str,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: bool | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[assistant_create_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
logo_s3_key: Optional[str] | NotGiven = NOT_GIVEN,
logo_text: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[assistant_create_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
suggested_questions: List[str] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[assistant_create_params.Tool]] | NotGiven = NOT_GIVEN,
url_slug: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -120,14 +117,14 @@ def create(
"knowledge_base_id": knowledge_base_id,
"name": name,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"logo_s3_key": logo_s3_key,
"logo_text": logo_text,
"model": model,
- "response_validation_config": response_validation_config,
"suggested_questions": suggested_questions,
- "tools": tools,
"url_slug": url_slug,
},
assistant_create_params.AssistantCreateParams,
@@ -180,15 +177,14 @@ def update(
knowledge_base_id: Optional[str],
name: str,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: bool | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[assistant_update_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
logo_s3_key: Optional[str] | NotGiven = NOT_GIVEN,
logo_text: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[assistant_update_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
suggested_questions: List[str] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[assistant_update_params.Tool]] | NotGiven = NOT_GIVEN,
url_slug: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -234,14 +230,14 @@ def update(
"knowledge_base_id": knowledge_base_id,
"name": name,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"logo_s3_key": logo_s3_key,
"logo_text": logo_text,
"model": model,
- "response_validation_config": response_validation_config,
"suggested_questions": suggested_questions,
- "tools": tools,
"url_slug": url_slug,
},
assistant_update_params.AssistantUpdateParams,
@@ -329,6 +325,42 @@ def delete(
cast_to=NoneType,
)
+ def retrieve_run_metadata(
+ self,
+ run_id: str,
+ *,
+ assistant_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AssistantRetrieveRunMetadataResponse:
+ """
+ Get historical run metadata for an assistant.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not assistant_id:
+ raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+ if not run_id:
+ raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ return self._get(
+ f"/api/assistants/{assistant_id}/historical_run_metadata/{run_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AssistantRetrieveRunMetadataResponse,
+ )
+
class AsyncAssistantsResource(AsyncAPIResource):
@cached_property
@@ -361,15 +393,14 @@ async def create(
knowledge_base_id: Optional[str],
name: str,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: bool | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[assistant_create_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
logo_s3_key: Optional[str] | NotGiven = NOT_GIVEN,
logo_text: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[assistant_create_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
suggested_questions: List[str] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[assistant_create_params.Tool]] | NotGiven = NOT_GIVEN,
url_slug: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -412,14 +443,14 @@ async def create(
"knowledge_base_id": knowledge_base_id,
"name": name,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"logo_s3_key": logo_s3_key,
"logo_text": logo_text,
"model": model,
- "response_validation_config": response_validation_config,
"suggested_questions": suggested_questions,
- "tools": tools,
"url_slug": url_slug,
},
assistant_create_params.AssistantCreateParams,
@@ -472,15 +503,14 @@ async def update(
knowledge_base_id: Optional[str],
name: str,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: bool | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[assistant_update_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
logo_s3_key: Optional[str] | NotGiven = NOT_GIVEN,
logo_text: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[assistant_update_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
suggested_questions: List[str] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[assistant_update_params.Tool]] | NotGiven = NOT_GIVEN,
url_slug: Optional[str] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
@@ -526,14 +556,14 @@ async def update(
"knowledge_base_id": knowledge_base_id,
"name": name,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"logo_s3_key": logo_s3_key,
"logo_text": logo_text,
"model": model,
- "response_validation_config": response_validation_config,
"suggested_questions": suggested_questions,
- "tools": tools,
"url_slug": url_slug,
},
assistant_update_params.AssistantUpdateParams,
@@ -621,6 +651,42 @@ async def delete(
cast_to=NoneType,
)
+ async def retrieve_run_metadata(
+ self,
+ run_id: str,
+ *,
+ assistant_id: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AssistantRetrieveRunMetadataResponse:
+ """
+ Get historical run metadata for an assistant.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not assistant_id:
+ raise ValueError(f"Expected a non-empty value for `assistant_id` but received {assistant_id!r}")
+ if not run_id:
+ raise ValueError(f"Expected a non-empty value for `run_id` but received {run_id!r}")
+ return await self._get(
+ f"/api/assistants/{assistant_id}/historical_run_metadata/{run_id}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AssistantRetrieveRunMetadataResponse,
+ )
+
class AssistantsResourceWithRawResponse:
def __init__(self, assistants: AssistantsResource) -> None:
@@ -641,6 +707,9 @@ def __init__(self, assistants: AssistantsResource) -> None:
self.delete = to_raw_response_wrapper(
assistants.delete,
)
+ self.retrieve_run_metadata = to_raw_response_wrapper(
+ assistants.retrieve_run_metadata,
+ )
@cached_property
def access_keys(self) -> AccessKeysResourceWithRawResponse:
@@ -666,6 +735,9 @@ def __init__(self, assistants: AsyncAssistantsResource) -> None:
self.delete = async_to_raw_response_wrapper(
assistants.delete,
)
+ self.retrieve_run_metadata = async_to_raw_response_wrapper(
+ assistants.retrieve_run_metadata,
+ )
@cached_property
def access_keys(self) -> AsyncAccessKeysResourceWithRawResponse:
@@ -691,6 +763,9 @@ def __init__(self, assistants: AssistantsResource) -> None:
self.delete = to_streamed_response_wrapper(
assistants.delete,
)
+ self.retrieve_run_metadata = to_streamed_response_wrapper(
+ assistants.retrieve_run_metadata,
+ )
@cached_property
def access_keys(self) -> AccessKeysResourceWithStreamingResponse:
@@ -716,6 +791,9 @@ def __init__(self, assistants: AsyncAssistantsResource) -> None:
self.delete = async_to_streamed_response_wrapper(
assistants.delete,
)
+ self.retrieve_run_metadata = async_to_streamed_response_wrapper(
+ assistants.retrieve_run_metadata,
+ )
@cached_property
def access_keys(self) -> AsyncAccessKeysResourceWithStreamingResponse:
diff --git a/src/agility/resources/integrations/integrations.py b/src/agility/resources/integrations/integrations.py
index 536398a..ae3f689 100644
--- a/src/agility/resources/integrations/integrations.py
+++ b/src/agility/resources/integrations/integrations.py
@@ -16,10 +16,7 @@
)
from ...types import integration_list_params, integration_create_params
from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from .available import (
AvailableResource,
diff --git a/src/agility/resources/knowledge_bases/knowledge_bases.py b/src/agility/resources/knowledge_bases/knowledge_bases.py
index 3d6cf25..6844a48 100644
--- a/src/agility/resources/knowledge_bases/knowledge_bases.py
+++ b/src/agility/resources/knowledge_bases/knowledge_bases.py
@@ -4,16 +4,9 @@
import httpx
-from ...types import (
- knowledge_base_list_params,
- knowledge_base_create_params,
- knowledge_base_update_params,
-)
+from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params
from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
diff --git a/src/agility/resources/knowledge_bases/sources/sources.py b/src/agility/resources/knowledge_bases/sources/sources.py
index 06bc2a6..efa0fc0 100644
--- a/src/agility/resources/knowledge_bases/sources/sources.py
+++ b/src/agility/resources/knowledge_bases/sources/sources.py
@@ -5,10 +5,7 @@
import httpx
from ...._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ...._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ...._utils import maybe_transform, async_maybe_transform
from .documents import (
DocumentsResource,
AsyncDocumentsResource,
diff --git a/src/agility/resources/threads/messages.py b/src/agility/resources/threads/messages.py
index c1e7fd9..ab6facc 100644
--- a/src/agility/resources/threads/messages.py
+++ b/src/agility/resources/threads/messages.py
@@ -8,10 +8,7 @@
import httpx
from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
diff --git a/src/agility/resources/threads/runs.py b/src/agility/resources/threads/runs.py
index 47c80b3..6435812 100644
--- a/src/agility/resources/threads/runs.py
+++ b/src/agility/resources/threads/runs.py
@@ -8,10 +8,7 @@
import httpx
from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -55,13 +52,12 @@ def create(
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Iterable[run_create_params.AdditionalMessage] | NotGiven = NOT_GIVEN,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: Optional[bool] | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[run_create_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
knowledge_base_id: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[run_create_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -93,12 +89,12 @@ def create(
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"knowledge_base_id": knowledge_base_id,
"model": model,
- "response_validation_config": response_validation_config,
- "tools": tools,
},
run_create_params.RunCreateParams,
),
@@ -189,13 +185,12 @@ def stream(
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Iterable[run_stream_params.AdditionalMessage] | NotGiven = NOT_GIVEN,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: Optional[bool] | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[run_stream_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
knowledge_base_id: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[run_stream_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_stream_params.Tool]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -227,12 +222,12 @@ def stream(
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"knowledge_base_id": knowledge_base_id,
"model": model,
- "response_validation_config": response_validation_config,
- "tools": tools,
},
run_stream_params.RunStreamParams,
),
@@ -271,13 +266,12 @@ async def create(
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Iterable[run_create_params.AdditionalMessage] | NotGiven = NOT_GIVEN,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: Optional[bool] | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[run_create_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
knowledge_base_id: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[run_create_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_create_params.Tool]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -309,12 +303,12 @@ async def create(
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"knowledge_base_id": knowledge_base_id,
"model": model,
- "response_validation_config": response_validation_config,
- "tools": tools,
},
run_create_params.RunCreateParams,
),
@@ -405,13 +399,12 @@ async def stream(
additional_instructions: Optional[str] | NotGiven = NOT_GIVEN,
additional_messages: Iterable[run_stream_params.AdditionalMessage] | NotGiven = NOT_GIVEN,
codex_access_key: Optional[str] | NotGiven = NOT_GIVEN,
+ codex_as_cache: Optional[bool] | NotGiven = NOT_GIVEN,
context_limit: Optional[int] | NotGiven = NOT_GIVEN,
+ hard_coded_queries: Optional[Iterable[run_stream_params.HardCodedQuery]] | NotGiven = NOT_GIVEN,
instructions: Optional[str] | NotGiven = NOT_GIVEN,
knowledge_base_id: Optional[str] | NotGiven = NOT_GIVEN,
model: Optional[Literal["gpt-4o"]] | NotGiven = NOT_GIVEN,
- response_validation_config: Optional[Iterable[run_stream_params.ResponseValidationConfig]]
- | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[run_stream_params.Tool]] | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -443,12 +436,12 @@ async def stream(
"additional_instructions": additional_instructions,
"additional_messages": additional_messages,
"codex_access_key": codex_access_key,
+ "codex_as_cache": codex_as_cache,
"context_limit": context_limit,
+ "hard_coded_queries": hard_coded_queries,
"instructions": instructions,
"knowledge_base_id": knowledge_base_id,
"model": model,
- "response_validation_config": response_validation_config,
- "tools": tools,
},
run_stream_params.RunStreamParams,
),
diff --git a/src/agility/types/__init__.py b/src/agility/types/__init__.py
index a9380ef..be25673 100644
--- a/src/agility/types/__init__.py
+++ b/src/agility/types/__init__.py
@@ -24,3 +24,6 @@
from .knowledge_base_list_response import KnowledgeBaseListResponse as KnowledgeBaseListResponse
from .knowledge_base_update_params import KnowledgeBaseUpdateParams as KnowledgeBaseUpdateParams
from .integration_retrieve_response import IntegrationRetrieveResponse as IntegrationRetrieveResponse
+from .assistant_retrieve_run_metadata_response import (
+ AssistantRetrieveRunMetadataResponse as AssistantRetrieveRunMetadataResponse,
+)
diff --git a/src/agility/types/assistant_create_params.py b/src/agility/types/assistant_create_params.py
index 70e0eb5..80e782d 100644
--- a/src/agility/types/assistant_create_params.py
+++ b/src/agility/types/assistant_create_params.py
@@ -2,10 +2,10 @@
from __future__ import annotations
-from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
-__all__ = ["AssistantCreateParams", "ResponseValidationConfig", "Tool", "ToolCodexV0Tool", "ToolNoOpTool"]
+__all__ = ["AssistantCreateParams", "HardCodedQuery"]
class AssistantCreateParams(TypedDict, total=False):
@@ -19,9 +19,13 @@ class AssistantCreateParams(TypedDict, total=False):
codex_access_key: Optional[str]
+ codex_as_cache: bool
+
context_limit: Optional[int]
"""The maximum number of context chunks to include in a run."""
+ hard_coded_queries: Optional[Iterable[HardCodedQuery]]
+
instructions: Optional[str]
logo_s3_key: Optional[str]
@@ -32,33 +36,20 @@ class AssistantCreateParams(TypedDict, total=False):
model: Optional[Literal["gpt-4o"]]
- response_validation_config: Optional[Iterable[ResponseValidationConfig]]
-
suggested_questions: List[str]
"""A list of suggested questions that can be asked to the assistant"""
- tools: Optional[Iterable[Tool]]
-
url_slug: Optional[str]
"""Optional URL suffix - unique identifier for the assistant's endpoint"""
-class ResponseValidationConfig(TypedDict, total=False):
- is_bad_threshold: Required[float]
-
- name: Required[
- Literal["trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"]
- ]
-
-
-class ToolCodexV0Tool(TypedDict, total=False):
- access_key: Required[str]
-
- type: Literal["codex_v0"]
+class HardCodedQuery(TypedDict, total=False):
+ query: Required[str]
+ response: Required[str]
-class ToolNoOpTool(TypedDict, total=False):
- type: Literal["noop"]
+ context: Optional[List[str]]
+ messages: Optional[Iterable[Dict[str, object]]]
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str]
diff --git a/src/agility/types/assistant_list_response.py b/src/agility/types/assistant_list_response.py
index d263ea0..241608f 100644
--- a/src/agility/types/assistant_list_response.py
+++ b/src/agility/types/assistant_list_response.py
@@ -1,33 +1,24 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Optional
from datetime import datetime
-from typing_extensions import Literal, TypeAlias
+from typing_extensions import Literal
from .._models import BaseModel
-__all__ = ["AssistantListResponse", "ResponseValidationConfig", "Tool", "ToolCodexV0Tool", "ToolNoOpTool"]
+__all__ = ["AssistantListResponse", "HardCodedQuery"]
-class ResponseValidationConfig(BaseModel):
- is_bad_threshold: float
+class HardCodedQuery(BaseModel):
+ query: str
- name: Literal[
- "trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"
- ]
+ response: str
+ context: Optional[List[str]] = None
-class ToolCodexV0Tool(BaseModel):
- access_key: str
+ messages: Optional[List[Dict[str, object]]] = None
- type: Optional[Literal["codex_v0"]] = None
-
-
-class ToolNoOpTool(BaseModel):
- type: Optional[Literal["noop"]] = None
-
-
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str] = None
class AssistantListResponse(BaseModel):
@@ -51,9 +42,13 @@ class AssistantListResponse(BaseModel):
codex_access_key: Optional[str] = None
+ codex_as_cache: Optional[bool] = None
+
context_limit: Optional[int] = None
"""The maximum number of context chunks to include in a run."""
+ hard_coded_queries: Optional[List[HardCodedQuery]] = None
+
instructions: Optional[str] = None
logo_s3_key: Optional[str] = None
@@ -64,12 +59,8 @@ class AssistantListResponse(BaseModel):
model: Optional[Literal["gpt-4o"]] = None
- response_validation_config: Optional[List[ResponseValidationConfig]] = None
-
suggested_questions: Optional[List[str]] = None
"""A list of suggested questions that can be asked to the assistant"""
- tools: Optional[List[Tool]] = None
-
url_slug: Optional[str] = None
"""Optional URL suffix - unique identifier for the assistant's endpoint"""
diff --git a/src/agility/types/assistant_retrieve_run_metadata_response.py b/src/agility/types/assistant_retrieve_run_metadata_response.py
new file mode 100644
index 0000000..9a69a17
--- /dev/null
+++ b/src/agility/types/assistant_retrieve_run_metadata_response.py
@@ -0,0 +1,19 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Dict, List, Optional
+
+from .._models import BaseModel
+
+__all__ = ["AssistantRetrieveRunMetadataResponse"]
+
+
+class AssistantRetrieveRunMetadataResponse(BaseModel):
+ query: str
+
+ response: str
+
+ context: Optional[List[str]] = None
+
+ messages: Optional[List[Dict[str, object]]] = None
+
+ prompt: Optional[str] = None
diff --git a/src/agility/types/assistant_update_params.py b/src/agility/types/assistant_update_params.py
index dba55cd..f780669 100644
--- a/src/agility/types/assistant_update_params.py
+++ b/src/agility/types/assistant_update_params.py
@@ -2,10 +2,10 @@
from __future__ import annotations
-from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
-__all__ = ["AssistantUpdateParams", "ResponseValidationConfig", "Tool", "ToolCodexV0Tool", "ToolNoOpTool"]
+__all__ = ["AssistantUpdateParams", "HardCodedQuery"]
class AssistantUpdateParams(TypedDict, total=False):
@@ -21,9 +21,13 @@ class AssistantUpdateParams(TypedDict, total=False):
codex_access_key: Optional[str]
+ codex_as_cache: bool
+
context_limit: Optional[int]
"""The maximum number of context chunks to include in a run."""
+ hard_coded_queries: Optional[Iterable[HardCodedQuery]]
+
instructions: Optional[str]
logo_s3_key: Optional[str]
@@ -34,33 +38,20 @@ class AssistantUpdateParams(TypedDict, total=False):
model: Optional[Literal["gpt-4o"]]
- response_validation_config: Optional[Iterable[ResponseValidationConfig]]
-
suggested_questions: List[str]
"""A list of suggested questions that can be asked to the assistant"""
- tools: Optional[Iterable[Tool]]
-
url_slug: Optional[str]
"""Optional URL suffix - unique identifier for the assistant's endpoint"""
-class ResponseValidationConfig(TypedDict, total=False):
- is_bad_threshold: Required[float]
-
- name: Required[
- Literal["trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"]
- ]
-
-
-class ToolCodexV0Tool(TypedDict, total=False):
- access_key: Required[str]
-
- type: Literal["codex_v0"]
+class HardCodedQuery(TypedDict, total=False):
+ query: Required[str]
+ response: Required[str]
-class ToolNoOpTool(TypedDict, total=False):
- type: Literal["noop"]
+ context: Optional[List[str]]
+ messages: Optional[Iterable[Dict[str, object]]]
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str]
diff --git a/src/agility/types/assistant_with_config.py b/src/agility/types/assistant_with_config.py
index 740a401..d6ee0b4 100644
--- a/src/agility/types/assistant_with_config.py
+++ b/src/agility/types/assistant_with_config.py
@@ -1,33 +1,24 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Optional
from datetime import datetime
-from typing_extensions import Literal, TypeAlias
+from typing_extensions import Literal
from .._models import BaseModel
-__all__ = ["AssistantWithConfig", "ResponseValidationConfig", "Tool", "ToolCodexV0Tool", "ToolNoOpTool"]
+__all__ = ["AssistantWithConfig", "HardCodedQuery"]
-class ResponseValidationConfig(BaseModel):
- is_bad_threshold: float
+class HardCodedQuery(BaseModel):
+ query: str
- name: Literal[
- "trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"
- ]
+ response: str
+ context: Optional[List[str]] = None
-class ToolCodexV0Tool(BaseModel):
- access_key: str
+ messages: Optional[List[Dict[str, object]]] = None
- type: Optional[Literal["codex_v0"]] = None
-
-
-class ToolNoOpTool(BaseModel):
- type: Optional[Literal["noop"]] = None
-
-
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str] = None
class AssistantWithConfig(BaseModel):
@@ -49,9 +40,13 @@ class AssistantWithConfig(BaseModel):
codex_access_key: Optional[str] = None
+ codex_as_cache: Optional[bool] = None
+
context_limit: Optional[int] = None
"""The maximum number of context chunks to include in a run."""
+ hard_coded_queries: Optional[List[HardCodedQuery]] = None
+
instructions: Optional[str] = None
logo_s3_key: Optional[str] = None
@@ -62,12 +57,8 @@ class AssistantWithConfig(BaseModel):
model: Optional[Literal["gpt-4o"]] = None
- response_validation_config: Optional[List[ResponseValidationConfig]] = None
-
suggested_questions: Optional[List[str]] = None
"""A list of suggested questions that can be asked to the assistant"""
- tools: Optional[List[Tool]] = None
-
url_slug: Optional[str] = None
"""Optional URL suffix - unique identifier for the assistant's endpoint"""
diff --git a/src/agility/types/knowledge_bases/source.py b/src/agility/types/knowledge_bases/source.py
index 77bb40e..37c0c17 100644
--- a/src/agility/types/knowledge_bases/source.py
+++ b/src/agility/types/knowledge_bases/source.py
@@ -80,6 +80,8 @@ class SourceParamsNotionV0Params(BaseModel):
limit: Optional[int] = None
+ max_age_days: Optional[int] = None
+
name: Optional[Literal["notion_v0"]] = None
diff --git a/src/agility/types/knowledge_bases/source_create_params.py b/src/agility/types/knowledge_bases/source_create_params.py
index 5908821..2ffdf15 100644
--- a/src/agility/types/knowledge_bases/source_create_params.py
+++ b/src/agility/types/knowledge_bases/source_create_params.py
@@ -87,6 +87,8 @@ class SourceParamsNotionV0Params(TypedDict, total=False):
limit: Optional[int]
+ max_age_days: int
+
name: Literal["notion_v0"]
diff --git a/src/agility/types/knowledge_bases/source_update_params.py b/src/agility/types/knowledge_bases/source_update_params.py
index 9d582e0..dbaf14a 100644
--- a/src/agility/types/knowledge_bases/source_update_params.py
+++ b/src/agility/types/knowledge_bases/source_update_params.py
@@ -89,6 +89,8 @@ class SourceParamsNotionV0Params(TypedDict, total=False):
limit: Optional[int]
+ max_age_days: int
+
name: Literal["notion_v0"]
diff --git a/src/agility/types/threads/message.py b/src/agility/types/threads/message.py
index 21cd6e2..56b5474 100644
--- a/src/agility/types/threads/message.py
+++ b/src/agility/types/threads/message.py
@@ -1,108 +1,46 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Optional
+from typing import Dict, List, Optional
from datetime import datetime
from typing_extensions import Literal
from ..._models import BaseModel
-__all__ = [
- "Message",
- "Metadata",
- "MetadataScores",
- "MetadataScoresContextSufficiency",
- "MetadataScoresContextSufficiencyLog",
- "MetadataScoresQueryEase",
- "MetadataScoresQueryEaseLog",
- "MetadataScoresResponseGroundedness",
- "MetadataScoresResponseGroundednessLog",
- "MetadataScoresResponseHelpfulness",
- "MetadataScoresResponseHelpfulnessLog",
- "MetadataScoresTrustworthiness",
- "MetadataScoresTrustworthinessLog",
-]
-
-
-class MetadataScoresContextSufficiencyLog(BaseModel):
- explanation: Optional[str] = None
-
-
-class MetadataScoresContextSufficiency(BaseModel):
- is_bad: Optional[bool] = None
-
- log: Optional[MetadataScoresContextSufficiencyLog] = None
-
- score: Optional[float] = None
-
-
-class MetadataScoresQueryEaseLog(BaseModel):
- explanation: Optional[str] = None
-
-
-class MetadataScoresQueryEase(BaseModel):
- is_bad: Optional[bool] = None
-
- log: Optional[MetadataScoresQueryEaseLog] = None
-
- score: Optional[float] = None
-
-
-class MetadataScoresResponseGroundednessLog(BaseModel):
- explanation: Optional[str] = None
-
-
-class MetadataScoresResponseGroundedness(BaseModel):
- is_bad: Optional[bool] = None
-
- log: Optional[MetadataScoresResponseGroundednessLog] = None
-
- score: Optional[float] = None
+__all__ = ["Message", "Metadata", "MetadataScores", "MetadataScoresLog"]
-class MetadataScoresResponseHelpfulnessLog(BaseModel):
+class MetadataScoresLog(BaseModel):
explanation: Optional[str] = None
-class MetadataScoresResponseHelpfulness(BaseModel):
- is_bad: Optional[bool] = None
-
- log: Optional[MetadataScoresResponseHelpfulnessLog] = None
-
- score: Optional[float] = None
-
-
-class MetadataScoresTrustworthinessLog(BaseModel):
- explanation: Optional[str] = None
-
-
-class MetadataScoresTrustworthiness(BaseModel):
+class MetadataScores(BaseModel):
is_bad: Optional[bool] = None
- log: Optional[MetadataScoresTrustworthinessLog] = None
+ log: Optional[MetadataScoresLog] = None
score: Optional[float] = None
+ triggered: Optional[bool] = None
-class MetadataScores(BaseModel):
- context_sufficiency: Optional[MetadataScoresContextSufficiency] = None
-
- query_ease: Optional[MetadataScoresQueryEase] = None
-
- response_groundedness: Optional[MetadataScoresResponseGroundedness] = None
-
- response_helpfulness: Optional[MetadataScoresResponseHelpfulness] = None
+ triggered_escalation: Optional[bool] = None
- trustworthiness: Optional[MetadataScoresTrustworthiness] = None
+ triggered_guardrail: Optional[bool] = None
class Metadata(BaseModel):
citations: Optional[List[str]] = None
+ escalated_to_sme: Optional[bool] = None
+
+ guardrailed: Optional[bool] = None
+
is_bad_response: Optional[bool] = None
is_expert_answer: Optional[bool] = None
- scores: Optional[MetadataScores] = None
+ original_llm_response: Optional[str] = None
+
+ scores: Optional[Dict[str, MetadataScores]] = None
trustworthiness_explanation: Optional[str] = None
diff --git a/src/agility/types/threads/message_create_params.py b/src/agility/types/threads/message_create_params.py
index e4848ad..ae995c2 100644
--- a/src/agility/types/threads/message_create_params.py
+++ b/src/agility/types/threads/message_create_params.py
@@ -2,24 +2,10 @@
from __future__ import annotations
-from typing import List, Optional
+from typing import Dict, List, Optional
from typing_extensions import Literal, Required, TypedDict
-__all__ = [
- "MessageCreateParams",
- "Metadata",
- "MetadataScores",
- "MetadataScoresContextSufficiency",
- "MetadataScoresContextSufficiencyLog",
- "MetadataScoresQueryEase",
- "MetadataScoresQueryEaseLog",
- "MetadataScoresResponseGroundedness",
- "MetadataScoresResponseGroundednessLog",
- "MetadataScoresResponseHelpfulness",
- "MetadataScoresResponseHelpfulnessLog",
- "MetadataScoresTrustworthiness",
- "MetadataScoresTrustworthinessLog",
-]
+__all__ = ["MessageCreateParams", "Metadata", "MetadataScores", "MetadataScoresLog"]
class MessageCreateParams(TypedDict, total=False):
@@ -30,86 +16,38 @@ class MessageCreateParams(TypedDict, total=False):
role: Required[Literal["user", "assistant"]]
-class MetadataScoresContextSufficiencyLog(TypedDict, total=False):
+class MetadataScoresLog(TypedDict, total=False):
explanation: Optional[str]
-class MetadataScoresContextSufficiency(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[MetadataScoresContextSufficiencyLog]
-
- score: Optional[float]
-
-
-class MetadataScoresQueryEaseLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class MetadataScoresQueryEase(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[MetadataScoresQueryEaseLog]
-
- score: Optional[float]
-
-
-class MetadataScoresResponseGroundednessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class MetadataScoresResponseGroundedness(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[MetadataScoresResponseGroundednessLog]
-
- score: Optional[float]
-
-
-class MetadataScoresResponseHelpfulnessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class MetadataScoresResponseHelpfulness(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[MetadataScoresResponseHelpfulnessLog]
-
- score: Optional[float]
-
-
-class MetadataScoresTrustworthinessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class MetadataScoresTrustworthiness(TypedDict, total=False):
+class MetadataScores(TypedDict, total=False):
is_bad: Optional[bool]
- log: Optional[MetadataScoresTrustworthinessLog]
+ log: Optional[MetadataScoresLog]
score: Optional[float]
+ triggered: Optional[bool]
-class MetadataScores(TypedDict, total=False):
- context_sufficiency: Optional[MetadataScoresContextSufficiency]
-
- query_ease: Optional[MetadataScoresQueryEase]
+ triggered_escalation: Optional[bool]
- response_groundedness: Optional[MetadataScoresResponseGroundedness]
-
- response_helpfulness: Optional[MetadataScoresResponseHelpfulness]
-
- trustworthiness: Optional[MetadataScoresTrustworthiness]
+ triggered_guardrail: Optional[bool]
class Metadata(TypedDict, total=False):
citations: Optional[List[str]]
+ escalated_to_sme: Optional[bool]
+
+ guardrailed: Optional[bool]
+
is_bad_response: Optional[bool]
is_expert_answer: Optional[bool]
- scores: Optional[MetadataScores]
+ original_llm_response: Optional[str]
+
+ scores: Optional[Dict[str, MetadataScores]]
trustworthiness_explanation: Optional[str]
diff --git a/src/agility/types/threads/run.py b/src/agility/types/threads/run.py
index 55a43c1..caa6942 100644
--- a/src/agility/types/threads/run.py
+++ b/src/agility/types/threads/run.py
@@ -1,33 +1,24 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import List, Union, Optional
+from typing import Dict, List, Optional
from datetime import datetime
-from typing_extensions import Literal, TypeAlias
+from typing_extensions import Literal
from ..._models import BaseModel
-__all__ = ["Run", "ResponseValidationConfig", "Tool", "ToolCodexV0Tool", "ToolNoOpTool", "Usage"]
+__all__ = ["Run", "HardCodedQuery", "Usage"]
-class ResponseValidationConfig(BaseModel):
- is_bad_threshold: float
+class HardCodedQuery(BaseModel):
+ query: str
- name: Literal[
- "trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"
- ]
+ response: str
+ context: Optional[List[str]] = None
-class ToolCodexV0Tool(BaseModel):
- access_key: str
+ messages: Optional[List[Dict[str, object]]] = None
- type: Optional[Literal["codex_v0"]] = None
-
-
-class ToolNoOpTool(BaseModel):
- type: Optional[Literal["noop"]] = None
-
-
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str] = None
class Usage(BaseModel):
@@ -55,11 +46,15 @@ class Run(BaseModel):
codex_access_key: Optional[str] = None
+ codex_as_cache: Optional[bool] = None
+
context_limit: Optional[int] = None
"""The maximum number of context chunks to include."""
deleted_at: Optional[datetime] = None
+ hard_coded_queries: Optional[List[HardCodedQuery]] = None
+
instructions: Optional[str] = None
knowledge_base_id: Optional[str] = None
@@ -68,8 +63,4 @@ class Run(BaseModel):
model: Optional[Literal["gpt-4o"]] = None
- response_validation_config: Optional[List[ResponseValidationConfig]] = None
-
- tools: Optional[List[Tool]] = None
-
usage: Optional[Usage] = None
diff --git a/src/agility/types/threads/run_create_params.py b/src/agility/types/threads/run_create_params.py
index e0f23da..8d0cc27 100644
--- a/src/agility/types/threads/run_create_params.py
+++ b/src/agility/types/threads/run_create_params.py
@@ -2,28 +2,16 @@
from __future__ import annotations
-from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
__all__ = [
"RunCreateParams",
"AdditionalMessage",
"AdditionalMessageMetadata",
"AdditionalMessageMetadataScores",
- "AdditionalMessageMetadataScoresContextSufficiency",
- "AdditionalMessageMetadataScoresContextSufficiencyLog",
- "AdditionalMessageMetadataScoresQueryEase",
- "AdditionalMessageMetadataScoresQueryEaseLog",
- "AdditionalMessageMetadataScoresResponseGroundedness",
- "AdditionalMessageMetadataScoresResponseGroundednessLog",
- "AdditionalMessageMetadataScoresResponseHelpfulness",
- "AdditionalMessageMetadataScoresResponseHelpfulnessLog",
- "AdditionalMessageMetadataScoresTrustworthiness",
- "AdditionalMessageMetadataScoresTrustworthinessLog",
- "ResponseValidationConfig",
- "Tool",
- "ToolCodexV0Tool",
- "ToolNoOpTool",
+ "AdditionalMessageMetadataScoresLog",
+ "HardCodedQuery",
]
@@ -36,100 +24,52 @@ class RunCreateParams(TypedDict, total=False):
codex_access_key: Optional[str]
+ codex_as_cache: Optional[bool]
+
context_limit: Optional[int]
"""The maximum number of context chunks to include."""
+ hard_coded_queries: Optional[Iterable[HardCodedQuery]]
+
instructions: Optional[str]
knowledge_base_id: Optional[str]
model: Optional[Literal["gpt-4o"]]
- response_validation_config: Optional[Iterable[ResponseValidationConfig]]
-
- tools: Optional[Iterable[Tool]]
-
-
-class AdditionalMessageMetadataScoresContextSufficiencyLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresContextSufficiency(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresContextSufficiencyLog]
-
- score: Optional[float]
-
-
-class AdditionalMessageMetadataScoresQueryEaseLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresQueryEase(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresQueryEaseLog]
-
- score: Optional[float]
-
-
-class AdditionalMessageMetadataScoresResponseGroundednessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresResponseGroundedness(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresResponseGroundednessLog]
-
- score: Optional[float]
-
-
-class AdditionalMessageMetadataScoresResponseHelpfulnessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresResponseHelpfulness(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresResponseHelpfulnessLog]
-
- score: Optional[float]
-
-class AdditionalMessageMetadataScoresTrustworthinessLog(TypedDict, total=False):
+class AdditionalMessageMetadataScoresLog(TypedDict, total=False):
explanation: Optional[str]
-class AdditionalMessageMetadataScoresTrustworthiness(TypedDict, total=False):
+class AdditionalMessageMetadataScores(TypedDict, total=False):
is_bad: Optional[bool]
- log: Optional[AdditionalMessageMetadataScoresTrustworthinessLog]
+ log: Optional[AdditionalMessageMetadataScoresLog]
score: Optional[float]
+ triggered: Optional[bool]
-class AdditionalMessageMetadataScores(TypedDict, total=False):
- context_sufficiency: Optional[AdditionalMessageMetadataScoresContextSufficiency]
-
- query_ease: Optional[AdditionalMessageMetadataScoresQueryEase]
+ triggered_escalation: Optional[bool]
- response_groundedness: Optional[AdditionalMessageMetadataScoresResponseGroundedness]
-
- response_helpfulness: Optional[AdditionalMessageMetadataScoresResponseHelpfulness]
-
- trustworthiness: Optional[AdditionalMessageMetadataScoresTrustworthiness]
+ triggered_guardrail: Optional[bool]
class AdditionalMessageMetadata(TypedDict, total=False):
citations: Optional[List[str]]
+ escalated_to_sme: Optional[bool]
+
+ guardrailed: Optional[bool]
+
is_bad_response: Optional[bool]
is_expert_answer: Optional[bool]
- scores: Optional[AdditionalMessageMetadataScores]
+ original_llm_response: Optional[str]
+
+ scores: Optional[Dict[str, AdditionalMessageMetadataScores]]
trustworthiness_explanation: Optional[str]
@@ -146,22 +86,13 @@ class AdditionalMessage(TypedDict, total=False):
thread_id: Required[str]
-class ResponseValidationConfig(TypedDict, total=False):
- is_bad_threshold: Required[float]
-
- name: Required[
- Literal["trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"]
- ]
-
-
-class ToolCodexV0Tool(TypedDict, total=False):
- access_key: Required[str]
-
- type: Literal["codex_v0"]
+class HardCodedQuery(TypedDict, total=False):
+ query: Required[str]
+ response: Required[str]
-class ToolNoOpTool(TypedDict, total=False):
- type: Literal["noop"]
+ context: Optional[List[str]]
+ messages: Optional[Iterable[Dict[str, object]]]
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str]
diff --git a/src/agility/types/threads/run_stream_params.py b/src/agility/types/threads/run_stream_params.py
index b4036ca..b5ae16c 100644
--- a/src/agility/types/threads/run_stream_params.py
+++ b/src/agility/types/threads/run_stream_params.py
@@ -2,28 +2,16 @@
from __future__ import annotations
-from typing import List, Union, Iterable, Optional
-from typing_extensions import Literal, Required, TypeAlias, TypedDict
+from typing import Dict, List, Iterable, Optional
+from typing_extensions import Literal, Required, TypedDict
__all__ = [
"RunStreamParams",
"AdditionalMessage",
"AdditionalMessageMetadata",
"AdditionalMessageMetadataScores",
- "AdditionalMessageMetadataScoresContextSufficiency",
- "AdditionalMessageMetadataScoresContextSufficiencyLog",
- "AdditionalMessageMetadataScoresQueryEase",
- "AdditionalMessageMetadataScoresQueryEaseLog",
- "AdditionalMessageMetadataScoresResponseGroundedness",
- "AdditionalMessageMetadataScoresResponseGroundednessLog",
- "AdditionalMessageMetadataScoresResponseHelpfulness",
- "AdditionalMessageMetadataScoresResponseHelpfulnessLog",
- "AdditionalMessageMetadataScoresTrustworthiness",
- "AdditionalMessageMetadataScoresTrustworthinessLog",
- "ResponseValidationConfig",
- "Tool",
- "ToolCodexV0Tool",
- "ToolNoOpTool",
+ "AdditionalMessageMetadataScoresLog",
+ "HardCodedQuery",
]
@@ -36,100 +24,52 @@ class RunStreamParams(TypedDict, total=False):
codex_access_key: Optional[str]
+ codex_as_cache: Optional[bool]
+
context_limit: Optional[int]
"""The maximum number of context chunks to include."""
+ hard_coded_queries: Optional[Iterable[HardCodedQuery]]
+
instructions: Optional[str]
knowledge_base_id: Optional[str]
model: Optional[Literal["gpt-4o"]]
- response_validation_config: Optional[Iterable[ResponseValidationConfig]]
-
- tools: Optional[Iterable[Tool]]
-
-
-class AdditionalMessageMetadataScoresContextSufficiencyLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresContextSufficiency(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresContextSufficiencyLog]
-
- score: Optional[float]
-
-
-class AdditionalMessageMetadataScoresQueryEaseLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresQueryEase(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresQueryEaseLog]
-
- score: Optional[float]
-
-
-class AdditionalMessageMetadataScoresResponseGroundednessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresResponseGroundedness(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresResponseGroundednessLog]
-
- score: Optional[float]
-
-
-class AdditionalMessageMetadataScoresResponseHelpfulnessLog(TypedDict, total=False):
- explanation: Optional[str]
-
-
-class AdditionalMessageMetadataScoresResponseHelpfulness(TypedDict, total=False):
- is_bad: Optional[bool]
-
- log: Optional[AdditionalMessageMetadataScoresResponseHelpfulnessLog]
-
- score: Optional[float]
-
-class AdditionalMessageMetadataScoresTrustworthinessLog(TypedDict, total=False):
+class AdditionalMessageMetadataScoresLog(TypedDict, total=False):
explanation: Optional[str]
-class AdditionalMessageMetadataScoresTrustworthiness(TypedDict, total=False):
+class AdditionalMessageMetadataScores(TypedDict, total=False):
is_bad: Optional[bool]
- log: Optional[AdditionalMessageMetadataScoresTrustworthinessLog]
+ log: Optional[AdditionalMessageMetadataScoresLog]
score: Optional[float]
+ triggered: Optional[bool]
-class AdditionalMessageMetadataScores(TypedDict, total=False):
- context_sufficiency: Optional[AdditionalMessageMetadataScoresContextSufficiency]
-
- query_ease: Optional[AdditionalMessageMetadataScoresQueryEase]
+ triggered_escalation: Optional[bool]
- response_groundedness: Optional[AdditionalMessageMetadataScoresResponseGroundedness]
-
- response_helpfulness: Optional[AdditionalMessageMetadataScoresResponseHelpfulness]
-
- trustworthiness: Optional[AdditionalMessageMetadataScoresTrustworthiness]
+ triggered_guardrail: Optional[bool]
class AdditionalMessageMetadata(TypedDict, total=False):
citations: Optional[List[str]]
+ escalated_to_sme: Optional[bool]
+
+ guardrailed: Optional[bool]
+
is_bad_response: Optional[bool]
is_expert_answer: Optional[bool]
- scores: Optional[AdditionalMessageMetadataScores]
+ original_llm_response: Optional[str]
+
+ scores: Optional[Dict[str, AdditionalMessageMetadataScores]]
trustworthiness_explanation: Optional[str]
@@ -146,22 +86,13 @@ class AdditionalMessage(TypedDict, total=False):
thread_id: Required[str]
-class ResponseValidationConfig(TypedDict, total=False):
- is_bad_threshold: Required[float]
-
- name: Required[
- Literal["trustworthiness", "response_helpfulness", "context_sufficiency", "response_groundedness", "query_ease"]
- ]
-
-
-class ToolCodexV0Tool(TypedDict, total=False):
- access_key: Required[str]
-
- type: Literal["codex_v0"]
+class HardCodedQuery(TypedDict, total=False):
+ query: Required[str]
+ response: Required[str]
-class ToolNoOpTool(TypedDict, total=False):
- type: Literal["noop"]
+ context: Optional[List[str]]
+ messages: Optional[Iterable[Dict[str, object]]]
-Tool: TypeAlias = Union[ToolCodexV0Tool, ToolNoOpTool]
+ prompt: Optional[str]
diff --git a/tests/api_resources/assistants/test_access_keys.py b/tests/api_resources/assistants/test_access_keys.py
index 00774cb..0a9f6ea 100644
--- a/tests/api_resources/assistants/test_access_keys.py
+++ b/tests/api_resources/assistants/test_access_keys.py
@@ -120,7 +120,9 @@ def test_path_params_list(self, client: Agility) -> None:
class TestAsyncAccessKeys:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/integrations/test_available.py b/tests/api_resources/integrations/test_available.py
index 36a89f0..5e250a9 100644
--- a/tests/api_resources/integrations/test_available.py
+++ b/tests/api_resources/integrations/test_available.py
@@ -44,7 +44,9 @@ def test_streaming_response_list(self, client: Agility) -> None:
class TestAsyncAvailable:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_list(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/integrations/test_rbac.py b/tests/api_resources/integrations/test_rbac.py
index bb00ede..79a9e80 100644
--- a/tests/api_resources/integrations/test_rbac.py
+++ b/tests/api_resources/integrations/test_rbac.py
@@ -57,7 +57,9 @@ def test_path_params_verify(self, client: Agility) -> None:
class TestAsyncRbac:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_verify(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/knowledge_bases/sources/test_documents.py b/tests/api_resources/knowledge_bases/sources/test_documents.py
index 507f61a..a84493d 100644
--- a/tests/api_resources/knowledge_bases/sources/test_documents.py
+++ b/tests/api_resources/knowledge_bases/sources/test_documents.py
@@ -138,7 +138,9 @@ def test_path_params_list(self, client: Agility) -> None:
class TestAsyncDocuments:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_retrieve(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/knowledge_bases/test_sources.py b/tests/api_resources/knowledge_bases/test_sources.py
index eb1ba6c..6b20e92 100644
--- a/tests/api_resources/knowledge_bases/test_sources.py
+++ b/tests/api_resources/knowledge_bases/test_sources.py
@@ -499,7 +499,9 @@ def test_path_params_sync(self, client: Agility) -> None:
class TestAsyncSources:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/test_assistants.py b/tests/api_resources/test_assistants.py
index 72441df..421cd2d 100644
--- a/tests/api_resources/test_assistants.py
+++ b/tests/api_resources/test_assistants.py
@@ -13,6 +13,7 @@
Assistant,
AssistantWithConfig,
AssistantListResponse,
+ AssistantRetrieveRunMetadataResponse,
)
from agility.pagination import SyncMyOffsetPage, AsyncMyOffsetPage
@@ -38,24 +39,22 @@ def test_method_create_with_all_params(self, client: Agility) -> None:
knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
name="name",
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
+ hard_coded_queries=[
+ {
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
+ }
+ ],
instructions="instructions",
logo_s3_key="logo_s3_key",
logo_text="logo_text",
model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
suggested_questions=["string"],
- tools=[
- {
- "access_key": "access_key",
- "type": "codex_v0",
- }
- ],
url_slug="url_slug",
)
assert_matches_type(Assistant, assistant, path=["response"])
@@ -146,24 +145,22 @@ def test_method_update_with_all_params(self, client: Agility) -> None:
knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
name="name",
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
+ hard_coded_queries=[
+ {
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
+ }
+ ],
instructions="instructions",
logo_s3_key="logo_s3_key",
logo_text="logo_text",
model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
suggested_questions=["string"],
- tools=[
- {
- "access_key": "access_key",
- "type": "codex_v0",
- }
- ],
url_slug="url_slug",
)
assert_matches_type(AssistantWithConfig, assistant, path=["response"])
@@ -282,9 +279,59 @@ def test_path_params_delete(self, client: Agility) -> None:
"",
)
+ @parametrize
+ def test_method_retrieve_run_metadata(self, client: Agility) -> None:
+ assistant = client.assistants.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(AssistantRetrieveRunMetadataResponse, assistant, path=["response"])
+
+ @parametrize
+ def test_raw_response_retrieve_run_metadata(self, client: Agility) -> None:
+ response = client.assistants.with_raw_response.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ assistant = response.parse()
+ assert_matches_type(AssistantRetrieveRunMetadataResponse, assistant, path=["response"])
+
+ @parametrize
+ def test_streaming_response_retrieve_run_metadata(self, client: Agility) -> None:
+ with client.assistants.with_streaming_response.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ assistant = response.parse()
+ assert_matches_type(AssistantRetrieveRunMetadataResponse, assistant, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ def test_path_params_retrieve_run_metadata(self, client: Agility) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
+ client.assistants.with_raw_response.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ client.assistants.with_raw_response.retrieve_run_metadata(
+ run_id="",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
class TestAsyncAssistants:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
@@ -302,24 +349,22 @@ async def test_method_create_with_all_params(self, async_client: AsyncAgility) -
knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
name="name",
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
+ hard_coded_queries=[
+ {
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
+ }
+ ],
instructions="instructions",
logo_s3_key="logo_s3_key",
logo_text="logo_text",
model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
suggested_questions=["string"],
- tools=[
- {
- "access_key": "access_key",
- "type": "codex_v0",
- }
- ],
url_slug="url_slug",
)
assert_matches_type(Assistant, assistant, path=["response"])
@@ -410,24 +455,22 @@ async def test_method_update_with_all_params(self, async_client: AsyncAgility) -
knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
name="name",
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
+ hard_coded_queries=[
+ {
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
+ }
+ ],
instructions="instructions",
logo_s3_key="logo_s3_key",
logo_text="logo_text",
model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
suggested_questions=["string"],
- tools=[
- {
- "access_key": "access_key",
- "type": "codex_v0",
- }
- ],
url_slug="url_slug",
)
assert_matches_type(AssistantWithConfig, assistant, path=["response"])
@@ -545,3 +588,51 @@ async def test_path_params_delete(self, async_client: AsyncAgility) -> None:
await async_client.assistants.with_raw_response.delete(
"",
)
+
+ @parametrize
+ async def test_method_retrieve_run_metadata(self, async_client: AsyncAgility) -> None:
+ assistant = await async_client.assistants.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+ assert_matches_type(AssistantRetrieveRunMetadataResponse, assistant, path=["response"])
+
+ @parametrize
+ async def test_raw_response_retrieve_run_metadata(self, async_client: AsyncAgility) -> None:
+ response = await async_client.assistants.with_raw_response.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ assistant = await response.parse()
+ assert_matches_type(AssistantRetrieveRunMetadataResponse, assistant, path=["response"])
+
+ @parametrize
+ async def test_streaming_response_retrieve_run_metadata(self, async_client: AsyncAgility) -> None:
+ async with async_client.assistants.with_streaming_response.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ assistant = await response.parse()
+ assert_matches_type(AssistantRetrieveRunMetadataResponse, assistant, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @parametrize
+ async def test_path_params_retrieve_run_metadata(self, async_client: AsyncAgility) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `assistant_id` but received ''"):
+ await async_client.assistants.with_raw_response.retrieve_run_metadata(
+ run_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ assistant_id="",
+ )
+
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `run_id` but received ''"):
+ await async_client.assistants.with_raw_response.retrieve_run_metadata(
+ run_id="",
+ assistant_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ )
diff --git a/tests/api_resources/test_integrations.py b/tests/api_resources/test_integrations.py
index 82d4400..8245159 100644
--- a/tests/api_resources/test_integrations.py
+++ b/tests/api_resources/test_integrations.py
@@ -197,7 +197,9 @@ def test_path_params_delete(self, client: Agility) -> None:
class TestAsyncIntegrations:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py
index 1f7410f..8b85392 100644
--- a/tests/api_resources/test_knowledge_bases.py
+++ b/tests/api_resources/test_knowledge_bases.py
@@ -254,7 +254,9 @@ def test_path_params_delete(self, client: Agility) -> None:
class TestAsyncKnowledgeBases:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/test_threads.py b/tests/api_resources/test_threads.py
index c83c69a..223d1d2 100644
--- a/tests/api_resources/test_threads.py
+++ b/tests/api_resources/test_threads.py
@@ -154,7 +154,9 @@ def test_path_params_delete(self, client: Agility) -> None:
class TestAsyncThreads:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/test_users.py b/tests/api_resources/test_users.py
index bfcbc6e..bd09bf2 100644
--- a/tests/api_resources/test_users.py
+++ b/tests/api_resources/test_users.py
@@ -57,7 +57,9 @@ def test_path_params_retrieve(self, client: Agility) -> None:
class TestAsyncUsers:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_retrieve(self, async_client: AsyncAgility) -> None:
diff --git a/tests/api_resources/threads/test_messages.py b/tests/api_resources/threads/test_messages.py
index 9182e92..dd0e8be 100644
--- a/tests/api_resources/threads/test_messages.py
+++ b/tests/api_resources/threads/test_messages.py
@@ -35,34 +35,20 @@ def test_method_create_with_all_params(self, client: Agility) -> None:
content="content",
metadata={
"citations": ["string"],
+ "escalated_to_sme": True,
+ "guardrailed": True,
"is_bad_response": True,
"is_expert_answer": True,
+ "original_llm_response": "original_llm_response",
"scores": {
- "context_sufficiency": {
+ "foo": {
"is_bad": True,
"log": {"explanation": "explanation"},
"score": 0,
- },
- "query_ease": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_groundedness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_helpfulness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "trustworthiness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
+ "triggered": True,
+ "triggered_escalation": True,
+ "triggered_guardrail": True,
+ }
},
"trustworthiness_explanation": "trustworthiness_explanation",
"trustworthiness_score": 0,
@@ -256,7 +242,9 @@ def test_path_params_delete(self, client: Agility) -> None:
class TestAsyncMessages:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
@@ -275,34 +263,20 @@ async def test_method_create_with_all_params(self, async_client: AsyncAgility) -
content="content",
metadata={
"citations": ["string"],
+ "escalated_to_sme": True,
+ "guardrailed": True,
"is_bad_response": True,
"is_expert_answer": True,
+ "original_llm_response": "original_llm_response",
"scores": {
- "context_sufficiency": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "query_ease": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_groundedness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_helpfulness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "trustworthiness": {
+ "foo": {
"is_bad": True,
"log": {"explanation": "explanation"},
"score": 0,
- },
+ "triggered": True,
+ "triggered_escalation": True,
+ "triggered_guardrail": True,
+ }
},
"trustworthiness_explanation": "trustworthiness_explanation",
"trustworthiness_score": 0,
diff --git a/tests/api_resources/threads/test_runs.py b/tests/api_resources/threads/test_runs.py
index cc18da6..7968e2f 100644
--- a/tests/api_resources/threads/test_runs.py
+++ b/tests/api_resources/threads/test_runs.py
@@ -36,34 +36,20 @@ def test_method_create_with_all_params(self, client: Agility) -> None:
"content": "content",
"metadata": {
"citations": ["string"],
+ "escalated_to_sme": True,
+ "guardrailed": True,
"is_bad_response": True,
"is_expert_answer": True,
+ "original_llm_response": "original_llm_response",
"scores": {
- "context_sufficiency": {
+ "foo": {
"is_bad": True,
"log": {"explanation": "explanation"},
"score": 0,
- },
- "query_ease": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_groundedness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_helpfulness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "trustworthiness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
+ "triggered": True,
+ "triggered_escalation": True,
+ "triggered_guardrail": True,
+ }
},
"trustworthiness_explanation": "trustworthiness_explanation",
"trustworthiness_score": 0,
@@ -73,22 +59,20 @@ def test_method_create_with_all_params(self, client: Agility) -> None:
}
],
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
- instructions="instructions",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
- tools=[
+ hard_coded_queries=[
{
- "access_key": "access_key",
- "type": "codex_v0",
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
}
],
+ instructions="instructions",
+ knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ model="gpt-4o",
)
assert_matches_type(Run, run, path=["response"])
@@ -241,34 +225,20 @@ def test_method_stream_with_all_params(self, client: Agility) -> None:
"content": "content",
"metadata": {
"citations": ["string"],
+ "escalated_to_sme": True,
+ "guardrailed": True,
"is_bad_response": True,
"is_expert_answer": True,
+ "original_llm_response": "original_llm_response",
"scores": {
- "context_sufficiency": {
+ "foo": {
"is_bad": True,
"log": {"explanation": "explanation"},
"score": 0,
- },
- "query_ease": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_groundedness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_helpfulness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "trustworthiness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
+ "triggered": True,
+ "triggered_escalation": True,
+ "triggered_guardrail": True,
+ }
},
"trustworthiness_explanation": "trustworthiness_explanation",
"trustworthiness_score": 0,
@@ -278,22 +248,20 @@ def test_method_stream_with_all_params(self, client: Agility) -> None:
}
],
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
- instructions="instructions",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
- tools=[
+ hard_coded_queries=[
{
- "access_key": "access_key",
- "type": "codex_v0",
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
}
],
+ instructions="instructions",
+ knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ model="gpt-4o",
)
assert_matches_type(object, run, path=["response"])
@@ -333,7 +301,9 @@ def test_path_params_stream(self, client: Agility) -> None:
class TestAsyncRuns:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_create(self, async_client: AsyncAgility) -> None:
@@ -354,34 +324,20 @@ async def test_method_create_with_all_params(self, async_client: AsyncAgility) -
"content": "content",
"metadata": {
"citations": ["string"],
+ "escalated_to_sme": True,
+ "guardrailed": True,
"is_bad_response": True,
"is_expert_answer": True,
+ "original_llm_response": "original_llm_response",
"scores": {
- "context_sufficiency": {
+ "foo": {
"is_bad": True,
"log": {"explanation": "explanation"},
"score": 0,
- },
- "query_ease": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_groundedness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_helpfulness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "trustworthiness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
+ "triggered": True,
+ "triggered_escalation": True,
+ "triggered_guardrail": True,
+ }
},
"trustworthiness_explanation": "trustworthiness_explanation",
"trustworthiness_score": 0,
@@ -391,22 +347,20 @@ async def test_method_create_with_all_params(self, async_client: AsyncAgility) -
}
],
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
- instructions="instructions",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
- tools=[
+ hard_coded_queries=[
{
- "access_key": "access_key",
- "type": "codex_v0",
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
}
],
+ instructions="instructions",
+ knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ model="gpt-4o",
)
assert_matches_type(Run, run, path=["response"])
@@ -559,34 +513,20 @@ async def test_method_stream_with_all_params(self, async_client: AsyncAgility) -
"content": "content",
"metadata": {
"citations": ["string"],
+ "escalated_to_sme": True,
+ "guardrailed": True,
"is_bad_response": True,
"is_expert_answer": True,
+ "original_llm_response": "original_llm_response",
"scores": {
- "context_sufficiency": {
+ "foo": {
"is_bad": True,
"log": {"explanation": "explanation"},
"score": 0,
- },
- "query_ease": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_groundedness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "response_helpfulness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
- "trustworthiness": {
- "is_bad": True,
- "log": {"explanation": "explanation"},
- "score": 0,
- },
+ "triggered": True,
+ "triggered_escalation": True,
+ "triggered_guardrail": True,
+ }
},
"trustworthiness_explanation": "trustworthiness_explanation",
"trustworthiness_score": 0,
@@ -596,22 +536,20 @@ async def test_method_stream_with_all_params(self, async_client: AsyncAgility) -
}
],
codex_access_key="codex_access_key",
+ codex_as_cache=True,
context_limit=1,
- instructions="instructions",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- model="gpt-4o",
- response_validation_config=[
- {
- "is_bad_threshold": 0,
- "name": "trustworthiness",
- }
- ],
- tools=[
+ hard_coded_queries=[
{
- "access_key": "access_key",
- "type": "codex_v0",
+ "query": "query",
+ "response": "response",
+ "context": ["string"],
+ "messages": [{"foo": "bar"}],
+ "prompt": "prompt",
}
],
+ instructions="instructions",
+ knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
+ model="gpt-4o",
)
assert_matches_type(object, run, path=["response"])
diff --git a/tests/api_resources/users/test_api_key.py b/tests/api_resources/users/test_api_key.py
index 6440582..0cfacb1 100644
--- a/tests/api_resources/users/test_api_key.py
+++ b/tests/api_resources/users/test_api_key.py
@@ -95,7 +95,9 @@ def test_path_params_refresh(self, client: Agility) -> None:
class TestAsyncAPIKey:
- parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
@parametrize
async def test_method_retrieve(self, async_client: AsyncAgility) -> None:
diff --git a/tests/conftest.py b/tests/conftest.py
index 8b10331..0c65545 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,16 +1,20 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
from __future__ import annotations
import os
import logging
from typing import TYPE_CHECKING, Iterator, AsyncIterator
+import httpx
import pytest
from pytest_asyncio import is_async_test
-from agility import Agility, AsyncAgility
+from agility import Agility, AsyncAgility, DefaultAioHttpClient
+from agility._utils import is_dict
if TYPE_CHECKING:
- from _pytest.fixtures import FixtureRequest
+ from _pytest.fixtures import FixtureRequest # pyright: ignore[reportPrivateImportUsage]
pytest.register_assert_rewrite("tests.utils")
@@ -25,6 +29,19 @@ def pytest_collection_modifyitems(items: list[pytest.Function]) -> None:
for async_test in pytest_asyncio_tests:
async_test.add_marker(session_scope_marker, append=False)
+ # We skip tests that use both the aiohttp client and respx_mock as respx_mock
+ # doesn't support custom transports.
+ for item in items:
+ if "async_client" not in item.fixturenames or "respx_mock" not in item.fixturenames:
+ continue
+
+ if not hasattr(item, "callspec"):
+ continue
+
+ async_client_param = item.callspec.params.get("async_client")
+ if is_dict(async_client_param) and async_client_param.get("http_client") == "aiohttp":
+ item.add_marker(pytest.mark.skip(reason="aiohttp client is not compatible with respx_mock"))
+
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -43,9 +60,25 @@ def client(request: FixtureRequest) -> Iterator[Agility]:
@pytest.fixture(scope="session")
async def async_client(request: FixtureRequest) -> AsyncIterator[AsyncAgility]:
- strict = getattr(request, "param", True)
- if not isinstance(strict, bool):
- raise TypeError(f"Unexpected fixture parameter type {type(strict)}, expected {bool}")
-
- async with AsyncAgility(base_url=base_url, api_key=api_key, _strict_response_validation=strict) as client:
+ param = getattr(request, "param", True)
+
+ # defaults
+ strict = True
+ http_client: None | httpx.AsyncClient = None
+
+ if isinstance(param, bool):
+ strict = param
+ elif is_dict(param):
+ strict = param.get("strict", True)
+ assert isinstance(strict, bool)
+
+ http_client_type = param.get("http_client", "httpx")
+ if http_client_type == "aiohttp":
+ http_client = DefaultAioHttpClient()
+ else:
+ raise TypeError(f"Unexpected fixture parameter type {type(param)}, expected bool or dict")
+
+ async with AsyncAgility(
+ base_url=base_url, api_key=api_key, _strict_response_validation=strict, http_client=http_client
+ ) as client:
yield client
diff --git a/tests/test_client.py b/tests/test_client.py
index 68bc2b7..dc1aeab 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -23,17 +23,16 @@
from agility import Agility, AsyncAgility, APIResponseValidationError
from agility._types import Omit
-from agility._utils import maybe_transform
from agility._models import BaseModel, FinalRequestOptions
-from agility._constants import RAW_RESPONSE_HEADER
from agility._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
from agility._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
+ DefaultHttpxClient,
+ DefaultAsyncHttpxClient,
make_request_options,
)
-from agility.types.assistant_create_params import AssistantCreateParams
from .utils import update_env
@@ -192,6 +191,7 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
def test_copy_build_request(self) -> None:
options = FinalRequestOptions(method="get", url="/foo")
@@ -452,7 +452,7 @@ def test_request_extra_query(self) -> None:
def test_multipart_repeating_array(self, client: Agility) -> None:
request = client._build_request(
FinalRequestOptions.construct(
- method="get",
+ method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
@@ -709,52 +709,25 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
@mock.patch("agility._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
+ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Agility) -> None:
respx_mock.post("/api/assistants/").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- self.client.post(
- "/api/assistants/",
- body=cast(
- object,
- maybe_transform(
- dict(
- description="description",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- name="name",
- ),
- AssistantCreateParams,
- ),
- ),
- cast_to=httpx.Response,
- options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
- )
+ client.assistants.with_streaming_response.create(
+ description="description", knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", name="name"
+ ).__enter__()
assert _get_open_connections(self.client) == 0
@mock.patch("agility._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
+ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Agility) -> None:
respx_mock.post("/api/assistants/").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- self.client.post(
- "/api/assistants/",
- body=cast(
- object,
- maybe_transform(
- dict(
- description="description",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- name="name",
- ),
- AssistantCreateParams,
- ),
- ),
- cast_to=httpx.Response,
- options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
- )
-
+ client.assistants.with_streaming_response.create(
+ description="description", knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", name="name"
+ ).__enter__()
assert _get_open_connections(self.client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@@ -846,6 +819,55 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
+ def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
+ # Test that the proxy environment variables are set correctly
+ monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+
+ client = DefaultHttpxClient()
+
+ mounts = tuple(client._mounts.items())
+ assert len(mounts) == 1
+ assert mounts[0][0].pattern == "https://"
+
+ @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning")
+ def test_default_client_creation(self) -> None:
+ # Ensure that the client can be initialized without any exceptions
+ DefaultHttpxClient(
+ verify=True,
+ cert=None,
+ trust_env=True,
+ http1=True,
+ http2=False,
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
+ )
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ self.client.post(
+ "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
+
class TestAsyncAgility:
client = AsyncAgility(base_url=base_url, api_key=api_key, _strict_response_validation=True)
@@ -982,6 +1004,7 @@ def test_copy_signature(self) -> None:
copy_param = copy_signature.parameters.get(name)
assert copy_param is not None, f"copy() signature is missing the {name} param"
+ @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12")
def test_copy_build_request(self) -> None:
options = FinalRequestOptions(method="get", url="/foo")
@@ -1244,7 +1267,7 @@ def test_request_extra_query(self) -> None:
def test_multipart_repeating_array(self, async_client: AsyncAgility) -> None:
request = async_client._build_request(
FinalRequestOptions.construct(
- method="get",
+ method="post",
url="/foo",
headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
json_data={"array": ["foo", "bar"]},
@@ -1517,52 +1540,27 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
@mock.patch("agility._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
+ async def test_retrying_timeout_errors_doesnt_leak(
+ self, respx_mock: MockRouter, async_client: AsyncAgility
+ ) -> None:
respx_mock.post("/api/assistants/").mock(side_effect=httpx.TimeoutException("Test timeout error"))
with pytest.raises(APITimeoutError):
- await self.client.post(
- "/api/assistants/",
- body=cast(
- object,
- maybe_transform(
- dict(
- description="description",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- name="name",
- ),
- AssistantCreateParams,
- ),
- ),
- cast_to=httpx.Response,
- options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
- )
+ await async_client.assistants.with_streaming_response.create(
+ description="description", knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", name="name"
+ ).__aenter__()
assert _get_open_connections(self.client) == 0
@mock.patch("agility._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
@pytest.mark.respx(base_url=base_url)
- async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter) -> None:
+ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, async_client: AsyncAgility) -> None:
respx_mock.post("/api/assistants/").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
- await self.client.post(
- "/api/assistants/",
- body=cast(
- object,
- maybe_transform(
- dict(
- description="description",
- knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
- name="name",
- ),
- AssistantCreateParams,
- ),
- ),
- cast_to=httpx.Response,
- options={"headers": {RAW_RESPONSE_HEADER: "stream"}},
- )
-
+ await async_client.assistants.with_streaming_response.create(
+ description="description", knowledge_base_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", name="name"
+ ).__aenter__()
assert _get_open_connections(self.client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
@@ -1701,3 +1699,52 @@ async def test_main() -> None:
raise AssertionError("calling get_platform using asyncify resulted in a hung process")
time.sleep(0.1)
+
+ async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
+ # Test that the proxy environment variables are set correctly
+ monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
+
+ client = DefaultAsyncHttpxClient()
+
+ mounts = tuple(client._mounts.items())
+ assert len(mounts) == 1
+ assert mounts[0][0].pattern == "https://"
+
+ @pytest.mark.filterwarnings("ignore:.*deprecated.*:DeprecationWarning")
+ async def test_default_client_creation(self) -> None:
+ # Ensure that the client can be initialized without any exceptions
+ DefaultAsyncHttpxClient(
+ verify=True,
+ cert=None,
+ trust_env=True,
+ http1=True,
+ http2=False,
+ limits=httpx.Limits(max_connections=100, max_keepalive_connections=20),
+ )
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ # Test that the default follow_redirects=True allows following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+ respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
+
+ response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ assert response.status_code == 200
+ assert response.json() == {"status": "ok"}
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ # Test that follow_redirects=False prevents following redirects
+ respx_mock.post("/redirect").mock(
+ return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ )
+
+ with pytest.raises(APIStatusError) as exc_info:
+ await self.client.post(
+ "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response
+ )
+
+ assert exc_info.value.response.status_code == 302
+ assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected"
diff --git a/tests/test_models.py b/tests/test_models.py
index 51b9989..4f636a4 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -1,5 +1,5 @@
import json
-from typing import Any, Dict, List, Union, Optional, cast
+from typing import TYPE_CHECKING, Any, Dict, List, Union, Optional, cast
from datetime import datetime, timezone
from typing_extensions import Literal, Annotated, TypeAliasType
@@ -492,12 +492,15 @@ class Model(BaseModel):
resource_id: Optional[str] = None
m = Model.construct()
+ assert m.resource_id is None
assert "resource_id" not in m.model_fields_set
m = Model.construct(resource_id=None)
+ assert m.resource_id is None
assert "resource_id" in m.model_fields_set
m = Model.construct(resource_id="foo")
+ assert m.resource_id == "foo"
assert "resource_id" in m.model_fields_set
@@ -832,7 +835,7 @@ class B(BaseModel):
@pytest.mark.skipif(not PYDANTIC_V2, reason="TypeAliasType is not supported in Pydantic v1")
def test_type_alias_type() -> None:
- Alias = TypeAliasType("Alias", str)
+ Alias = TypeAliasType("Alias", str) # pyright: ignore
class Model(BaseModel):
alias: Alias
@@ -886,3 +889,75 @@ class ModelB(BaseModel):
)
assert isinstance(m, ModelB)
+
+
+def test_nested_discriminated_union() -> None:
+ class InnerType1(BaseModel):
+ type: Literal["type_1"]
+
+ class InnerModel(BaseModel):
+ inner_value: str
+
+ class InnerType2(BaseModel):
+ type: Literal["type_2"]
+ some_inner_model: InnerModel
+
+ class Type1(BaseModel):
+ base_type: Literal["base_type_1"]
+ value: Annotated[
+ Union[
+ InnerType1,
+ InnerType2,
+ ],
+ PropertyInfo(discriminator="type"),
+ ]
+
+ class Type2(BaseModel):
+ base_type: Literal["base_type_2"]
+
+ T = Annotated[
+ Union[
+ Type1,
+ Type2,
+ ],
+ PropertyInfo(discriminator="base_type"),
+ ]
+
+ model = construct_type(
+ type_=T,
+ value={
+ "base_type": "base_type_1",
+ "value": {
+ "type": "type_2",
+ },
+ },
+ )
+ assert isinstance(model, Type1)
+ assert isinstance(model.value, InnerType2)
+
+
+@pytest.mark.skipif(not PYDANTIC_V2, reason="this is only supported in pydantic v2 for now")
+def test_extra_properties() -> None:
+ class Item(BaseModel):
+ prop: int
+
+ class Model(BaseModel):
+ __pydantic_extra__: Dict[str, Item] = Field(init=False) # pyright: ignore[reportIncompatibleVariableOverride]
+
+ other: str
+
+ if TYPE_CHECKING:
+
+ def __getattr__(self, attr: str) -> Item: ...
+
+ model = construct_type(
+ type_=Model,
+ value={
+ "a": {"prop": 1},
+ "other": "foo",
+ },
+ )
+ assert isinstance(model, Model)
+ assert model.a.prop == 1
+ assert isinstance(model.a, Item)
+ assert model.other == "foo"
diff --git a/tests/test_utils/test_proxy.py b/tests/test_utils/test_proxy.py
index 8da4dab..e2dc0cd 100644
--- a/tests/test_utils/test_proxy.py
+++ b/tests/test_utils/test_proxy.py
@@ -21,3 +21,14 @@ def test_recursive_proxy() -> None:
assert dir(proxy) == []
assert type(proxy).__name__ == "RecursiveLazyProxy"
assert type(operator.attrgetter("name.foo.bar.baz")(proxy)).__name__ == "RecursiveLazyProxy"
+
+
+def test_isinstance_does_not_error() -> None:
+ class AlwaysErrorProxy(LazyProxy[Any]):
+ @override
+ def __load__(self) -> Any:
+ raise RuntimeError("Mocking missing dependency")
+
+ proxy = AlwaysErrorProxy()
+ assert not isinstance(proxy, dict)
+ assert isinstance(proxy, LazyProxy)