diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 315f7d30..7657c56b 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.1.0-alpha.26"
+ ".": "0.1.0-alpha.27"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index 386ca3e4..feb65cbf 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,3 +1,3 @@
-configured_endpoints: 56
-openapi_spec_hash: 12260ab88069ff15d254606e041debfb
-config_hash: 6c3ad84d97bf1d0989ad2ec0cae64078
+configured_endpoints: 54
+openapi_spec_hash: f263c6c6d8d75a8f7c1e9c65188e7ef2
+config_hash: 04312af86542d1127f09d3f3cbe5bb50
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 03ab7e85..d237111e 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Changelog
+## 0.1.0-alpha.27 (2025-09-19)
+
+Full Changelog: [v0.1.0-alpha.26...v0.1.0-alpha.27](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.26...v0.1.0-alpha.27)
+
+### Features
+
+* **api:** api update ([7075aca](https://github.com/cleanlab/codex-python/commit/7075aca8381929be65bd7d0310a501620bbede90))
+* **api:** api update ([54beb02](https://github.com/cleanlab/codex-python/commit/54beb025117a30e6b0e56986bde48457e6539d01))
+* **api:** api update ([16ec26b](https://github.com/cleanlab/codex-python/commit/16ec26b53edf9724bf78f1267a641eb711bdc3e5))
+* **api:** api update ([9a65cae](https://github.com/cleanlab/codex-python/commit/9a65cae3b351709a98eb4bc8f695e91f397c4994))
+* **api:** api update ([7ccf252](https://github.com/cleanlab/codex-python/commit/7ccf252ba391da0a565e0fd1dd33cc9223a1eb9a))
+
+
+### Chores
+
+* **internal:** update pydantic dependency ([2c0bd75](https://github.com/cleanlab/codex-python/commit/2c0bd7546c46ad96707d948d34f60df28c701697))
+* **tests:** simplify `get_platform` test ([1c03b05](https://github.com/cleanlab/codex-python/commit/1c03b059628ef850cde4760950bc4e7a4d830015))
+* **types:** change optional parameter type from NotGiven to Omit ([5cd7ee0](https://github.com/cleanlab/codex-python/commit/5cd7ee0987fe4e29292e3e9c738fe2bd5945d008))
+
## 0.1.0-alpha.26 (2025-09-05)
Full Changelog: [v0.1.0-alpha.25...v0.1.0-alpha.26](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.25...v0.1.0-alpha.26)
diff --git a/api.md b/api.md
index a78c4fc4..dc60687a 100644
--- a/api.md
+++ b/api.md
@@ -252,16 +252,3 @@ Methods:
- client.projects.remediations.pause(remediation_id, \*, project_id) -> RemediationPauseResponse
- client.projects.remediations.publish(remediation_id, \*, project_id) -> RemediationPublishResponse
- client.projects.remediations.unpause(remediation_id, \*, project_id) -> RemediationUnpauseResponse
-
-# Tlm
-
-Types:
-
-```python
-from codex.types import TlmPromptResponse, TlmScoreResponse
-```
-
-Methods:
-
-- client.tlm.prompt(\*\*params) -> TlmPromptResponse
-- client.tlm.score(\*\*params) -> TlmScoreResponse
diff --git a/pyproject.toml b/pyproject.toml
index e09e8bc2..214e6aa5 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "codex-sdk"
-version = "0.1.0-alpha.26"
+version = "0.1.0-alpha.27"
description = "Internal SDK used within cleanlab-codex package. Refer to https://pypi.org/project/cleanlab-codex/ instead."
dynamic = ["readme"]
license = "MIT"
@@ -56,7 +56,6 @@ dev-dependencies = [
"dirty-equals>=0.6.0",
"importlib-metadata>=6.7.0",
"rich>=13.7.1",
- "nest_asyncio==1.6.0",
"pytest-xdist>=3.6.1",
]
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 7999ff41..2aaac36f 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -75,7 +75,6 @@ multidict==6.4.4
mypy==1.14.1
mypy-extensions==1.0.0
# via mypy
-nest-asyncio==1.6.0
nodeenv==1.8.0
# via pyright
nox==2023.4.22
@@ -89,9 +88,9 @@ pluggy==1.5.0
propcache==0.3.1
# via aiohttp
# via yarl
-pydantic==2.10.3
+pydantic==2.11.9
# via codex-sdk
-pydantic-core==2.27.1
+pydantic-core==2.33.2
# via pydantic
pygments==2.18.0
# via rich
@@ -127,6 +126,9 @@ typing-extensions==4.12.2
# via pydantic
# via pydantic-core
# via pyright
+ # via typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
virtualenv==20.24.5
# via nox
yarl==1.20.0
diff --git a/requirements.lock b/requirements.lock
index bde9133e..a0182743 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -55,9 +55,9 @@ multidict==6.4.4
propcache==0.3.1
# via aiohttp
# via yarl
-pydantic==2.10.3
+pydantic==2.11.9
# via codex-sdk
-pydantic-core==2.27.1
+pydantic-core==2.33.2
# via pydantic
sniffio==1.3.0
# via anyio
@@ -68,5 +68,8 @@ typing-extensions==4.12.2
# via multidict
# via pydantic
# via pydantic-core
+ # via typing-inspection
+typing-inspection==0.4.1
+ # via pydantic
yarl==1.20.0
# via aiohttp
diff --git a/src/codex/__init__.py b/src/codex/__init__.py
index 2b07cd12..373dabd4 100644
--- a/src/codex/__init__.py
+++ b/src/codex/__init__.py
@@ -3,7 +3,7 @@
import typing as _t
from . import types
-from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes
+from ._types import NOT_GIVEN, Omit, NoneType, NotGiven, Transport, ProxiesTypes, omit, not_given
from ._utils import file_from_path
from ._client import (
ENVIRONMENTS,
@@ -49,7 +49,9 @@
"ProxiesTypes",
"NotGiven",
"NOT_GIVEN",
+ "not_given",
"Omit",
+ "omit",
"CodexError",
"APIError",
"APIStatusError",
diff --git a/src/codex/_base_client.py b/src/codex/_base_client.py
index e424fb76..e6febf3a 100644
--- a/src/codex/_base_client.py
+++ b/src/codex/_base_client.py
@@ -42,7 +42,6 @@
from ._qs import Querystring
from ._files import to_httpx_files, async_to_httpx_files
from ._types import (
- NOT_GIVEN,
Body,
Omit,
Query,
@@ -57,6 +56,7 @@
RequestOptions,
HttpxRequestFiles,
ModelBuilderProtocol,
+ not_given,
)
from ._utils import is_dict, is_list, asyncify, is_given, lru_cache, is_mapping
from ._compat import PYDANTIC_V1, model_copy, model_dump
@@ -145,9 +145,9 @@ def __init__(
def __init__(
self,
*,
- url: URL | NotGiven = NOT_GIVEN,
- json: Body | NotGiven = NOT_GIVEN,
- params: Query | NotGiven = NOT_GIVEN,
+ url: URL | NotGiven = not_given,
+ json: Body | NotGiven = not_given,
+ params: Query | NotGiven = not_given,
) -> None:
self.url = url
self.json = json
@@ -595,7 +595,7 @@ def _maybe_override_cast_to(self, cast_to: type[ResponseT], options: FinalReques
# we internally support defining a temporary header to override the
# default `cast_to` type for use with `.with_raw_response` and `.with_streaming_response`
# see _response.py for implementation details
- override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, NOT_GIVEN)
+ override_cast_to = headers.pop(OVERRIDE_CAST_TO_HEADER, not_given)
if is_given(override_cast_to):
options.headers = headers
return cast(Type[ResponseT], override_cast_to)
@@ -825,7 +825,7 @@ def __init__(
version: str,
base_url: str | URL,
max_retries: int = DEFAULT_MAX_RETRIES,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
@@ -1356,7 +1356,7 @@ def __init__(
base_url: str | URL,
_strict_response_validation: bool,
max_retries: int = DEFAULT_MAX_RETRIES,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
custom_headers: Mapping[str, str] | None = None,
custom_query: Mapping[str, object] | None = None,
@@ -1818,8 +1818,8 @@ def make_request_options(
extra_query: Query | None = None,
extra_body: Body | None = None,
idempotency_key: str | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- post_parser: PostParser | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ post_parser: PostParser | NotGiven = not_given,
) -> RequestOptions:
"""Create a dict of type RequestOptions without keys of NotGiven values."""
options: RequestOptions = {}
diff --git a/src/codex/_client.py b/src/codex/_client.py
index 352d0dbc..308ce9ae 100644
--- a/src/codex/_client.py
+++ b/src/codex/_client.py
@@ -3,7 +3,7 @@
from __future__ import annotations
import os
-from typing import Any, Dict, Union, Mapping, cast
+from typing import Any, Dict, Mapping, cast
from typing_extensions import Self, Literal, override
import httpx
@@ -11,7 +11,6 @@
from . import _exceptions
from ._qs import Querystring
from ._types import (
- NOT_GIVEN,
Omit,
Headers,
Timeout,
@@ -19,10 +18,11 @@
Transport,
ProxiesTypes,
RequestOptions,
+ not_given,
)
from ._utils import is_given, get_async_library
from ._version import __version__
-from .resources import tlm, health
+from .resources import health
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import APIStatusError
from ._base_client import (
@@ -58,7 +58,6 @@ class Codex(SyncAPIClient):
organizations: organizations.OrganizationsResource
users: users.UsersResource
projects: projects.ProjectsResource
- tlm: tlm.TlmResource
with_raw_response: CodexWithRawResponse
with_streaming_response: CodexWithStreamedResponse
@@ -75,9 +74,9 @@ def __init__(
auth_token: str | None = None,
api_key: str | None = None,
access_key: str | None = None,
- environment: Literal["production", "staging", "local"] | NotGiven = NOT_GIVEN,
- base_url: str | httpx.URL | None | NotGiven = NOT_GIVEN,
- timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
+ environment: Literal["production", "staging", "local"] | NotGiven = not_given,
+ base_url: str | httpx.URL | None | NotGiven = not_given,
+ timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
@@ -143,7 +142,6 @@ def __init__(
self.organizations = organizations.OrganizationsResource(self)
self.users = users.UsersResource(self)
self.projects = projects.ProjectsResource(self)
- self.tlm = tlm.TlmResource(self)
self.with_raw_response = CodexWithRawResponse(self)
self.with_streaming_response = CodexWithStreamedResponse(self)
@@ -216,9 +214,9 @@ def copy(
access_key: str | None = None,
environment: Literal["production", "staging", "local"] | None = None,
base_url: str | httpx.URL | None = None,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.Client | None = None,
- max_retries: int | NotGiven = NOT_GIVEN,
+ max_retries: int | NotGiven = not_given,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
@@ -304,7 +302,6 @@ class AsyncCodex(AsyncAPIClient):
organizations: organizations.AsyncOrganizationsResource
users: users.AsyncUsersResource
projects: projects.AsyncProjectsResource
- tlm: tlm.AsyncTlmResource
with_raw_response: AsyncCodexWithRawResponse
with_streaming_response: AsyncCodexWithStreamedResponse
@@ -321,9 +318,9 @@ def __init__(
auth_token: str | None = None,
api_key: str | None = None,
access_key: str | None = None,
- environment: Literal["production", "staging", "local"] | NotGiven = NOT_GIVEN,
- base_url: str | httpx.URL | None | NotGiven = NOT_GIVEN,
- timeout: Union[float, Timeout, None, NotGiven] = NOT_GIVEN,
+ environment: Literal["production", "staging", "local"] | NotGiven = not_given,
+ base_url: str | httpx.URL | None | NotGiven = not_given,
+ timeout: float | Timeout | None | NotGiven = not_given,
max_retries: int = DEFAULT_MAX_RETRIES,
default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
@@ -389,7 +386,6 @@ def __init__(
self.organizations = organizations.AsyncOrganizationsResource(self)
self.users = users.AsyncUsersResource(self)
self.projects = projects.AsyncProjectsResource(self)
- self.tlm = tlm.AsyncTlmResource(self)
self.with_raw_response = AsyncCodexWithRawResponse(self)
self.with_streaming_response = AsyncCodexWithStreamedResponse(self)
@@ -462,9 +458,9 @@ def copy(
access_key: str | None = None,
environment: Literal["production", "staging", "local"] | None = None,
base_url: str | httpx.URL | None = None,
- timeout: float | Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | Timeout | None | NotGiven = not_given,
http_client: httpx.AsyncClient | None = None,
- max_retries: int | NotGiven = NOT_GIVEN,
+ max_retries: int | NotGiven = not_given,
default_headers: Mapping[str, str] | None = None,
set_default_headers: Mapping[str, str] | None = None,
default_query: Mapping[str, object] | None = None,
@@ -551,7 +547,6 @@ def __init__(self, client: Codex) -> None:
self.organizations = organizations.OrganizationsResourceWithRawResponse(client.organizations)
self.users = users.UsersResourceWithRawResponse(client.users)
self.projects = projects.ProjectsResourceWithRawResponse(client.projects)
- self.tlm = tlm.TlmResourceWithRawResponse(client.tlm)
class AsyncCodexWithRawResponse:
@@ -560,7 +555,6 @@ def __init__(self, client: AsyncCodex) -> None:
self.organizations = organizations.AsyncOrganizationsResourceWithRawResponse(client.organizations)
self.users = users.AsyncUsersResourceWithRawResponse(client.users)
self.projects = projects.AsyncProjectsResourceWithRawResponse(client.projects)
- self.tlm = tlm.AsyncTlmResourceWithRawResponse(client.tlm)
class CodexWithStreamedResponse:
@@ -569,7 +563,6 @@ def __init__(self, client: Codex) -> None:
self.organizations = organizations.OrganizationsResourceWithStreamingResponse(client.organizations)
self.users = users.UsersResourceWithStreamingResponse(client.users)
self.projects = projects.ProjectsResourceWithStreamingResponse(client.projects)
- self.tlm = tlm.TlmResourceWithStreamingResponse(client.tlm)
class AsyncCodexWithStreamedResponse:
@@ -578,7 +571,6 @@ def __init__(self, client: AsyncCodex) -> None:
self.organizations = organizations.AsyncOrganizationsResourceWithStreamingResponse(client.organizations)
self.users = users.AsyncUsersResourceWithStreamingResponse(client.users)
self.projects = projects.AsyncProjectsResourceWithStreamingResponse(client.projects)
- self.tlm = tlm.AsyncTlmResourceWithStreamingResponse(client.tlm)
Client = Codex
diff --git a/src/codex/_models.py b/src/codex/_models.py
index 3a6017ef..6a3cd1d2 100644
--- a/src/codex/_models.py
+++ b/src/codex/_models.py
@@ -256,7 +256,7 @@ def model_dump(
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
- by_alias: bool = False,
+ by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
@@ -264,6 +264,7 @@ def model_dump(
warnings: bool | Literal["none", "warn", "error"] = True,
context: dict[str, Any] | None = None,
serialize_as_any: bool = False,
+ fallback: Callable[[Any], Any] | None = None,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
@@ -295,10 +296,12 @@ def model_dump(
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
+ if fallback is not None:
+ raise ValueError("fallback is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
- by_alias=by_alias,
+ by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
@@ -313,13 +316,14 @@ def model_dump_json(
indent: int | None = None,
include: IncEx | None = None,
exclude: IncEx | None = None,
- by_alias: bool = False,
+ by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
context: dict[str, Any] | None = None,
+ fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump_json
@@ -348,11 +352,13 @@ def model_dump_json(
raise ValueError("context is only supported in Pydantic v2")
if serialize_as_any != False:
raise ValueError("serialize_as_any is only supported in Pydantic v2")
+ if fallback is not None:
+ raise ValueError("fallback is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
exclude=exclude,
- by_alias=by_alias,
+ by_alias=by_alias if by_alias is not None else False,
exclude_unset=exclude_unset,
exclude_defaults=exclude_defaults,
exclude_none=exclude_none,
diff --git a/src/codex/_qs.py b/src/codex/_qs.py
index 274320ca..ada6fd3f 100644
--- a/src/codex/_qs.py
+++ b/src/codex/_qs.py
@@ -4,7 +4,7 @@
from urllib.parse import parse_qs, urlencode
from typing_extensions import Literal, get_args
-from ._types import NOT_GIVEN, NotGiven, NotGivenOr
+from ._types import NotGiven, not_given
from ._utils import flatten
_T = TypeVar("_T")
@@ -41,8 +41,8 @@ def stringify(
self,
params: Params,
*,
- array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
- nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+ array_format: ArrayFormat | NotGiven = not_given,
+ nested_format: NestedFormat | NotGiven = not_given,
) -> str:
return urlencode(
self.stringify_items(
@@ -56,8 +56,8 @@ def stringify_items(
self,
params: Params,
*,
- array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
- nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+ array_format: ArrayFormat | NotGiven = not_given,
+ nested_format: NestedFormat | NotGiven = not_given,
) -> list[tuple[str, str]]:
opts = Options(
qs=self,
@@ -143,8 +143,8 @@ def __init__(
self,
qs: Querystring = _qs,
*,
- array_format: NotGivenOr[ArrayFormat] = NOT_GIVEN,
- nested_format: NotGivenOr[NestedFormat] = NOT_GIVEN,
+ array_format: ArrayFormat | NotGiven = not_given,
+ nested_format: NestedFormat | NotGiven = not_given,
) -> None:
self.array_format = qs.array_format if isinstance(array_format, NotGiven) else array_format
self.nested_format = qs.nested_format if isinstance(nested_format, NotGiven) else nested_format
diff --git a/src/codex/_types.py b/src/codex/_types.py
index 99d66e20..2e4695f9 100644
--- a/src/codex/_types.py
+++ b/src/codex/_types.py
@@ -117,18 +117,21 @@ class RequestOptions(TypedDict, total=False):
# Sentinel class used until PEP 0661 is accepted
class NotGiven:
"""
- A sentinel singleton class used to distinguish omitted keyword arguments
- from those passed in with the value None (which may have different behavior).
+ For parameters with a meaningful None value, we need to distinguish between
+ the user explicitly passing None, and the user not passing the parameter at
+ all.
+
+ User code shouldn't need to use not_given directly.
For example:
```py
- def get(timeout: Union[int, NotGiven, None] = NotGiven()) -> Response: ...
+ def create(timeout: Timeout | None | NotGiven = not_given): ...
- get(timeout=1) # 1s timeout
- get(timeout=None) # No timeout
- get() # Default timeout behavior, which may not be statically known at the method definition.
+ create(timeout=1) # 1s timeout
+ create(timeout=None) # No timeout
+ create() # Default timeout behavior
```
"""
@@ -140,13 +143,14 @@ def __repr__(self) -> str:
return "NOT_GIVEN"
-NotGivenOr = Union[_T, NotGiven]
+not_given = NotGiven()
+# for backwards compatibility:
NOT_GIVEN = NotGiven()
class Omit:
- """In certain situations you need to be able to represent a case where a default value has
- to be explicitly removed and `None` is not an appropriate substitute, for example:
+ """
+ To explicitly omit something from being sent in a request, use `omit`.
```py
# as the default `Content-Type` header is `application/json` that will be sent
@@ -156,8 +160,8 @@ class Omit:
# to look something like: 'multipart/form-data; boundary=0d8382fcf5f8c3be01ca2e11002d2983'
client.post(..., headers={"Content-Type": "multipart/form-data"})
- # instead you can remove the default `application/json` header by passing Omit
- client.post(..., headers={"Content-Type": Omit()})
+ # instead you can remove the default `application/json` header by passing omit
+ client.post(..., headers={"Content-Type": omit})
```
"""
@@ -165,6 +169,9 @@ def __bool__(self) -> Literal[False]:
return False
+omit = Omit()
+
+
@runtime_checkable
class ModelBuilderProtocol(Protocol):
@classmethod
diff --git a/src/codex/_utils/_transform.py b/src/codex/_utils/_transform.py
index c19124f0..52075492 100644
--- a/src/codex/_utils/_transform.py
+++ b/src/codex/_utils/_transform.py
@@ -268,7 +268,7 @@ def _transform_typeddict(
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
if not is_given(value):
- # we don't need to include `NotGiven` values here as they'll
+ # we don't need to include omitted values here as they'll
# be stripped out before the request is sent anyway
continue
@@ -434,7 +434,7 @@ async def _async_transform_typeddict(
annotations = get_type_hints(expected_type, include_extras=True)
for key, value in data.items():
if not is_given(value):
- # we don't need to include `NotGiven` values here as they'll
+ # we don't need to include omitted values here as they'll
# be stripped out before the request is sent anyway
continue
diff --git a/src/codex/_utils/_utils.py b/src/codex/_utils/_utils.py
index f0818595..50d59269 100644
--- a/src/codex/_utils/_utils.py
+++ b/src/codex/_utils/_utils.py
@@ -21,7 +21,7 @@
import sniffio
-from .._types import NotGiven, FileTypes, NotGivenOr, HeadersLike
+from .._types import Omit, NotGiven, FileTypes, HeadersLike
_T = TypeVar("_T")
_TupleT = TypeVar("_TupleT", bound=Tuple[object, ...])
@@ -63,7 +63,7 @@ def _extract_items(
try:
key = path[index]
except IndexError:
- if isinstance(obj, NotGiven):
+ if not is_given(obj):
# no value was provided - we can safely ignore
return []
@@ -126,8 +126,8 @@ def _extract_items(
return []
-def is_given(obj: NotGivenOr[_T]) -> TypeGuard[_T]:
- return not isinstance(obj, NotGiven)
+def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]:
+ return not isinstance(obj, NotGiven) and not isinstance(obj, Omit)
# Type safe methods for narrowing types with TypeVars.
diff --git a/src/codex/_version.py b/src/codex/_version.py
index 1e7d5f15..0e64acc3 100644
--- a/src/codex/_version.py
+++ b/src/codex/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "codex"
-__version__ = "0.1.0-alpha.26" # x-release-please-version
+__version__ = "0.1.0-alpha.27" # x-release-please-version
diff --git a/src/codex/resources/__init__.py b/src/codex/resources/__init__.py
index f91f0e43..b96b725a 100644
--- a/src/codex/resources/__init__.py
+++ b/src/codex/resources/__init__.py
@@ -1,13 +1,5 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from .tlm import (
- TlmResource,
- AsyncTlmResource,
- TlmResourceWithRawResponse,
- AsyncTlmResourceWithRawResponse,
- TlmResourceWithStreamingResponse,
- AsyncTlmResourceWithStreamingResponse,
-)
from .users import (
UsersResource,
AsyncUsersResource,
@@ -66,10 +58,4 @@
"AsyncProjectsResourceWithRawResponse",
"ProjectsResourceWithStreamingResponse",
"AsyncProjectsResourceWithStreamingResponse",
- "TlmResource",
- "AsyncTlmResource",
- "TlmResourceWithRawResponse",
- "AsyncTlmResourceWithRawResponse",
- "TlmResourceWithStreamingResponse",
- "AsyncTlmResourceWithStreamingResponse",
]
diff --git a/src/codex/resources/health.py b/src/codex/resources/health.py
index d74d23a5..d82d5cbb 100644
--- a/src/codex/resources/health.py
+++ b/src/codex/resources/health.py
@@ -4,7 +4,7 @@
import httpx
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from .._types import Body, Query, Headers, NotGiven, not_given
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
@@ -47,7 +47,7 @@ def check(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> HealthCheckResponse:
"""Check the health of the application."""
return self._get(
@@ -66,7 +66,7 @@ def db(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> HealthCheckResponse:
"""Check the database connection."""
return self._get(
@@ -106,7 +106,7 @@ async def check(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> HealthCheckResponse:
"""Check the health of the application."""
return await self._get(
@@ -125,7 +125,7 @@ async def db(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> HealthCheckResponse:
"""Check the database connection."""
return await self._get(
diff --git a/src/codex/resources/organizations/billing/billing.py b/src/codex/resources/organizations/billing/billing.py
index 79c2a5c9..3a9eca12 100644
--- a/src/codex/resources/organizations/billing/billing.py
+++ b/src/codex/resources/organizations/billing/billing.py
@@ -4,7 +4,7 @@
import httpx
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -85,7 +85,7 @@ def invoices(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingInvoicesSchema:
"""
Get invoices iFrame URL for an organization.
@@ -118,7 +118,7 @@ def usage(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingUsageSchema:
"""
Get usage iFrame URL for an organization.
@@ -184,7 +184,7 @@ async def invoices(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingInvoicesSchema:
"""
Get invoices iFrame URL for an organization.
@@ -217,7 +217,7 @@ async def usage(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingUsageSchema:
"""
Get usage iFrame URL for an organization.
diff --git a/src/codex/resources/organizations/billing/card_details.py b/src/codex/resources/organizations/billing/card_details.py
index 94cb8a70..2a3f6704 100644
--- a/src/codex/resources/organizations/billing/card_details.py
+++ b/src/codex/resources/organizations/billing/card_details.py
@@ -6,7 +6,7 @@
import httpx
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -50,7 +50,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Optional[OrganizationBillingCardDetails]:
"""
Get card details for an organization.
@@ -104,7 +104,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> Optional[OrganizationBillingCardDetails]:
"""
Get card details for an organization.
diff --git a/src/codex/resources/organizations/billing/plan_details.py b/src/codex/resources/organizations/billing/plan_details.py
index 6ff726e2..029c02f2 100644
--- a/src/codex/resources/organizations/billing/plan_details.py
+++ b/src/codex/resources/organizations/billing/plan_details.py
@@ -4,7 +4,7 @@
import httpx
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -48,7 +48,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingPlanDetails:
"""
Get plan details for an organization.
@@ -104,7 +104,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingPlanDetails:
"""
Get plan details for an organization.
diff --git a/src/codex/resources/organizations/billing/setup_intent.py b/src/codex/resources/organizations/billing/setup_intent.py
index ba915c6a..4efa57d0 100644
--- a/src/codex/resources/organizations/billing/setup_intent.py
+++ b/src/codex/resources/organizations/billing/setup_intent.py
@@ -4,7 +4,7 @@
import httpx
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -48,7 +48,7 @@ def create(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingSetupIntent:
"""
Create a setup intent for an organization.
@@ -102,7 +102,7 @@ async def create(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationBillingSetupIntent:
"""
Create a setup intent for an organization.
diff --git a/src/codex/resources/organizations/organizations.py b/src/codex/resources/organizations/organizations.py
index f1eb4d5e..32b60fea 100644
--- a/src/codex/resources/organizations/organizations.py
+++ b/src/codex/resources/organizations/organizations.py
@@ -4,7 +4,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._types import Body, Query, Headers, NotGiven, not_given
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -62,7 +62,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationSchemaPublic:
"""
Get a single organization.
@@ -95,7 +95,7 @@ def list_members(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationListMembersResponse:
"""
Get a list of organization members with their names and emails.
@@ -128,7 +128,7 @@ def retrieve_permissions(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationRetrievePermissionsResponse:
"""
Get the user's permissions for this organization.
@@ -186,7 +186,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationSchemaPublic:
"""
Get a single organization.
@@ -219,7 +219,7 @@ async def list_members(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationListMembersResponse:
"""
Get a list of organization members with their names and emails.
@@ -252,7 +252,7 @@ async def retrieve_permissions(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> OrganizationRetrievePermissionsResponse:
"""
Get the user's permissions for this organization.
diff --git a/src/codex/resources/projects/access_keys.py b/src/codex/resources/projects/access_keys.py
index 15190030..d18cef0d 100644
--- a/src/codex/resources/projects/access_keys.py
+++ b/src/codex/resources/projects/access_keys.py
@@ -7,7 +7,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
from ..._utils import maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -51,18 +51,18 @@ def create(
project_id: str,
*,
name: str,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- expires_at: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- x_client_library_version: str | NotGiven = NOT_GIVEN,
- x_integration_type: str | NotGiven = NOT_GIVEN,
- x_source: str | NotGiven = NOT_GIVEN,
- x_stainless_package_version: str | NotGiven = NOT_GIVEN,
+ description: Optional[str] | Omit = omit,
+ expires_at: Union[str, datetime, None] | Omit = omit,
+ x_client_library_version: str | Omit = omit,
+ x_integration_type: str | Omit = omit,
+ x_source: str | Omit = omit,
+ x_stainless_package_version: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeySchema:
"""
Create a new access key.
@@ -115,7 +115,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeySchema:
"""
Get a single access key.
@@ -147,14 +147,14 @@ def update(
*,
project_id: str,
name: str,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- expires_at: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
+ description: Optional[str] | Omit = omit,
+ expires_at: Union[str, datetime, None] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeySchema:
"""
Update an existing access key.
@@ -197,7 +197,7 @@ def list(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeyListResponse:
"""
List all access keys for a project.
@@ -231,7 +231,7 @@ def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete an existing access key.
@@ -266,7 +266,7 @@ def retrieve_project_id(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeyRetrieveProjectIDResponse:
"""Get the project ID from an access key."""
return self._get(
@@ -287,7 +287,7 @@ def revoke(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Revoke an access key.
@@ -340,18 +340,18 @@ async def create(
project_id: str,
*,
name: str,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- expires_at: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- x_client_library_version: str | NotGiven = NOT_GIVEN,
- x_integration_type: str | NotGiven = NOT_GIVEN,
- x_source: str | NotGiven = NOT_GIVEN,
- x_stainless_package_version: str | NotGiven = NOT_GIVEN,
+ description: Optional[str] | Omit = omit,
+ expires_at: Union[str, datetime, None] | Omit = omit,
+ x_client_library_version: str | Omit = omit,
+ x_integration_type: str | Omit = omit,
+ x_source: str | Omit = omit,
+ x_stainless_package_version: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeySchema:
"""
Create a new access key.
@@ -404,7 +404,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeySchema:
"""
Get a single access key.
@@ -436,14 +436,14 @@ async def update(
*,
project_id: str,
name: str,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- expires_at: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
+ description: Optional[str] | Omit = omit,
+ expires_at: Union[str, datetime, None] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeySchema:
"""
Update an existing access key.
@@ -486,7 +486,7 @@ async def list(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeyListResponse:
"""
List all access keys for a project.
@@ -520,7 +520,7 @@ async def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete an existing access key.
@@ -555,7 +555,7 @@ async def retrieve_project_id(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AccessKeyRetrieveProjectIDResponse:
"""Get the project ID from an access key."""
return await self._get(
@@ -576,7 +576,7 @@ async def revoke(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Revoke an access key.
diff --git a/src/codex/resources/projects/evals.py b/src/codex/resources/projects/evals.py
index 9de41b79..54802918 100644
--- a/src/codex/resources/projects/evals.py
+++ b/src/codex/resources/projects/evals.py
@@ -7,7 +7,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import required_args, maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -52,22 +52,22 @@ def create(
criteria: str,
eval_key: str,
name: str,
- context_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- enabled: bool | NotGiven = NOT_GIVEN,
- is_default: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- query_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- response_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ context_identifier: Optional[str] | Omit = omit,
+ enabled: bool | Omit = omit,
+ is_default: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ query_identifier: Optional[str] | Omit = omit,
+ response_identifier: Optional[str] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Create a new custom eval for a project.
@@ -150,22 +150,22 @@ def update(
criteria: str,
body_eval_key: str,
name: str,
- context_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- enabled: bool | NotGiven = NOT_GIVEN,
- is_default: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- query_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- response_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ context_identifier: Optional[str] | Omit = omit,
+ enabled: bool | Omit = omit,
+ is_default: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ query_identifier: Optional[str] | Omit = omit,
+ response_identifier: Optional[str] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Update an existing eval for a project.
@@ -220,18 +220,18 @@ def update(
*,
project_id: str,
body_eval_key: str,
- enabled: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ enabled: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Update an existing eval for a project.
@@ -270,25 +270,25 @@ def update(
path_eval_key: str,
*,
project_id: str,
- criteria: str | NotGiven = NOT_GIVEN,
+ criteria: str | Omit = omit,
body_eval_key: str,
- name: str | NotGiven = NOT_GIVEN,
- context_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- enabled: bool | NotGiven = NOT_GIVEN,
- is_default: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- query_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- response_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ name: str | Omit = omit,
+ context_identifier: Optional[str] | Omit = omit,
+ enabled: bool | Omit = omit,
+ is_default: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ query_identifier: Optional[str] | Omit = omit,
+ response_identifier: Optional[str] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
if not project_id:
raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
@@ -324,15 +324,15 @@ def list(
self,
project_id: str,
*,
- guardrails_only: bool | NotGiven = NOT_GIVEN,
- limit: Optional[int] | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
+ guardrails_only: bool | Omit = omit,
+ limit: Optional[int] | Omit = omit,
+ offset: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalListResponse:
"""
Get the evaluations config for a project with optional pagination.
@@ -377,7 +377,7 @@ def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Remove a custom eval for a project.
@@ -431,22 +431,22 @@ async def create(
criteria: str,
eval_key: str,
name: str,
- context_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- enabled: bool | NotGiven = NOT_GIVEN,
- is_default: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- query_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- response_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ context_identifier: Optional[str] | Omit = omit,
+ enabled: bool | Omit = omit,
+ is_default: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ query_identifier: Optional[str] | Omit = omit,
+ response_identifier: Optional[str] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Create a new custom eval for a project.
@@ -529,22 +529,22 @@ async def update(
criteria: str,
body_eval_key: str,
name: str,
- context_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- enabled: bool | NotGiven = NOT_GIVEN,
- is_default: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- query_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- response_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ context_identifier: Optional[str] | Omit = omit,
+ enabled: bool | Omit = omit,
+ is_default: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ query_identifier: Optional[str] | Omit = omit,
+ response_identifier: Optional[str] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Update an existing eval for a project.
@@ -599,18 +599,18 @@ async def update(
*,
project_id: str,
body_eval_key: str,
- enabled: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ enabled: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Update an existing eval for a project.
@@ -649,25 +649,25 @@ async def update(
path_eval_key: str,
*,
project_id: str,
- criteria: str | NotGiven = NOT_GIVEN,
+ criteria: str | Omit = omit,
body_eval_key: str,
- name: str | NotGiven = NOT_GIVEN,
- context_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- enabled: bool | NotGiven = NOT_GIVEN,
- is_default: bool | NotGiven = NOT_GIVEN,
- priority: Optional[int] | NotGiven = NOT_GIVEN,
- query_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- response_identifier: Optional[str] | NotGiven = NOT_GIVEN,
- should_escalate: bool | NotGiven = NOT_GIVEN,
- should_guardrail: bool | NotGiven = NOT_GIVEN,
- threshold: float | NotGiven = NOT_GIVEN,
- threshold_direction: Literal["above", "below"] | NotGiven = NOT_GIVEN,
+ name: str | Omit = omit,
+ context_identifier: Optional[str] | Omit = omit,
+ enabled: bool | Omit = omit,
+ is_default: bool | Omit = omit,
+ priority: Optional[int] | Omit = omit,
+ query_identifier: Optional[str] | Omit = omit,
+ response_identifier: Optional[str] | Omit = omit,
+ should_escalate: bool | Omit = omit,
+ should_guardrail: bool | Omit = omit,
+ threshold: float | Omit = omit,
+ threshold_direction: Literal["above", "below"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
if not project_id:
raise ValueError(f"Expected a non-empty value for `project_id` but received {project_id!r}")
@@ -703,15 +703,15 @@ async def list(
self,
project_id: str,
*,
- guardrails_only: bool | NotGiven = NOT_GIVEN,
- limit: Optional[int] | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
+ guardrails_only: bool | Omit = omit,
+ limit: Optional[int] | Omit = omit,
+ offset: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> EvalListResponse:
"""
Get the evaluations config for a project with optional pagination.
@@ -756,7 +756,7 @@ async def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Remove a custom eval for a project.
diff --git a/src/codex/resources/projects/projects.py b/src/codex/resources/projects/projects.py
index f0ef92ab..2ed4dd2c 100644
--- a/src/codex/resources/projects/projects.py
+++ b/src/codex/resources/projects/projects.py
@@ -23,7 +23,7 @@
project_invite_sme_params,
project_retrieve_analytics_params,
)
-from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven, SequenceNotStr
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
from .query_logs import (
@@ -110,14 +110,14 @@ def create(
config: project_create_params.Config,
name: str,
organization_id: str,
- auto_clustering_enabled: bool | NotGiven = NOT_GIVEN,
- description: Optional[str] | NotGiven = NOT_GIVEN,
+ auto_clustering_enabled: bool | Omit = omit,
+ description: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Create a new project.
@@ -158,7 +158,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectRetrieveResponse:
"""
Get a single project.
@@ -186,16 +186,16 @@ def update(
self,
project_id: str,
*,
- auto_clustering_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
- config: Optional[project_update_params.Config] | NotGiven = NOT_GIVEN,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
+ auto_clustering_enabled: Optional[bool] | Omit = omit,
+ config: Optional[project_update_params.Config] | Omit = omit,
+ description: Optional[str] | Omit = omit,
+ name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Update a project.
@@ -231,19 +231,19 @@ def update(
def list(
self,
*,
- include_unaddressed_counts: bool | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- organization_id: str | NotGiven = NOT_GIVEN,
- query: Optional[str] | NotGiven = NOT_GIVEN,
- sort: Literal["created_at", "updated_at"] | NotGiven = NOT_GIVEN,
+ include_unaddressed_counts: bool | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ organization_id: str | Omit = omit,
+ query: Optional[str] | Omit = omit,
+ sort: Literal["created_at", "updated_at"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectListResponse:
"""
List projects for organization.
@@ -289,7 +289,7 @@ def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete a project.
@@ -323,7 +323,7 @@ def export(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> object:
"""
Export all data for a project as a JSON file.
@@ -359,7 +359,7 @@ def invite_sme(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectInviteSmeResponse:
"""
Invite a subject matter expert to view a specific query log or remediation.
@@ -397,14 +397,14 @@ def retrieve_analytics(
self,
project_id: str,
*,
- end: int | NotGiven = NOT_GIVEN,
- start: int | NotGiven = NOT_GIVEN,
+ end: int | Omit = omit,
+ start: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectRetrieveAnalyticsResponse:
"""
Get Project Analytics Route
@@ -449,28 +449,27 @@ def validate(
context: str,
query: str,
response: project_validate_params.Response,
- use_llm_matching: Optional[bool] | NotGiven = NOT_GIVEN,
- constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- custom_eval_thresholds: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[object] | NotGiven = NOT_GIVEN,
- eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
- messages: Iterable[project_validate_params.Message] | NotGiven = NOT_GIVEN,
- options: Optional[project_validate_params.Options] | NotGiven = NOT_GIVEN,
- prompt: Optional[str] | NotGiven = NOT_GIVEN,
- quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
- rewritten_question: Optional[str] | NotGiven = NOT_GIVEN,
- task: Optional[str] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[project_validate_params.Tool]] | NotGiven = NOT_GIVEN,
- x_client_library_version: str | NotGiven = NOT_GIVEN,
- x_integration_type: str | NotGiven = NOT_GIVEN,
- x_source: str | NotGiven = NOT_GIVEN,
- x_stainless_package_version: str | NotGiven = NOT_GIVEN,
+ use_llm_matching: Optional[bool] | Omit = omit,
+ constrain_outputs: Optional[SequenceNotStr[str]] | Omit = omit,
+ custom_eval_thresholds: Optional[Dict[str, float]] | Omit = omit,
+ custom_metadata: Optional[object] | Omit = omit,
+ eval_scores: Optional[Dict[str, float]] | Omit = omit,
+ messages: Iterable[project_validate_params.Message] | Omit = omit,
+ options: Optional[project_validate_params.Options] | Omit = omit,
+ quality_preset: Literal["best", "high", "medium", "low", "base"] | Omit = omit,
+ rewritten_question: Optional[str] | Omit = omit,
+ task: Optional[str] | Omit = omit,
+ tools: Optional[Iterable[project_validate_params.Tool]] | Omit = omit,
+ x_client_library_version: str | Omit = omit,
+ x_integration_type: str | Omit = omit,
+ x_source: str | Omit = omit,
+ x_stainless_package_version: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectValidateResponse:
"""
Evaluate whether a response, given the provided query and context, is
@@ -576,12 +575,8 @@ def validate(
When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
This parameter has no effect when `disable_trustworthiness` is True.
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
-
- prompt: The prompt to use for the TLM call. If not provided, the prompt will be
- generated from the messages.
+ disable_trustworthiness (bool, default = False): if True, TLM will not compute trust scores,
+ useful if you only want to compute custom evaluation criteria.
quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
@@ -625,7 +620,6 @@ def validate(
"eval_scores": eval_scores,
"messages": messages,
"options": options,
- "prompt": prompt,
"quality_preset": quality_preset,
"rewritten_question": rewritten_question,
"task": task,
@@ -688,14 +682,14 @@ async def create(
config: project_create_params.Config,
name: str,
organization_id: str,
- auto_clustering_enabled: bool | NotGiven = NOT_GIVEN,
- description: Optional[str] | NotGiven = NOT_GIVEN,
+ auto_clustering_enabled: bool | Omit = omit,
+ description: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Create a new project.
@@ -736,7 +730,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectRetrieveResponse:
"""
Get a single project.
@@ -764,16 +758,16 @@ async def update(
self,
project_id: str,
*,
- auto_clustering_enabled: Optional[bool] | NotGiven = NOT_GIVEN,
- config: Optional[project_update_params.Config] | NotGiven = NOT_GIVEN,
- description: Optional[str] | NotGiven = NOT_GIVEN,
- name: Optional[str] | NotGiven = NOT_GIVEN,
+ auto_clustering_enabled: Optional[bool] | Omit = omit,
+ config: Optional[project_update_params.Config] | Omit = omit,
+ description: Optional[str] | Omit = omit,
+ name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectReturnSchema:
"""
Update a project.
@@ -809,19 +803,19 @@ async def update(
async def list(
self,
*,
- include_unaddressed_counts: bool | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- organization_id: str | NotGiven = NOT_GIVEN,
- query: Optional[str] | NotGiven = NOT_GIVEN,
- sort: Literal["created_at", "updated_at"] | NotGiven = NOT_GIVEN,
+ include_unaddressed_counts: bool | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ organization_id: str | Omit = omit,
+ query: Optional[str] | Omit = omit,
+ sort: Literal["created_at", "updated_at"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectListResponse:
"""
List projects for organization.
@@ -867,7 +861,7 @@ async def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete a project.
@@ -901,7 +895,7 @@ async def export(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> object:
"""
Export all data for a project as a JSON file.
@@ -937,7 +931,7 @@ async def invite_sme(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectInviteSmeResponse:
"""
Invite a subject matter expert to view a specific query log or remediation.
@@ -975,14 +969,14 @@ async def retrieve_analytics(
self,
project_id: str,
*,
- end: int | NotGiven = NOT_GIVEN,
- start: int | NotGiven = NOT_GIVEN,
+ end: int | Omit = omit,
+ start: int | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectRetrieveAnalyticsResponse:
"""
Get Project Analytics Route
@@ -1027,28 +1021,27 @@ async def validate(
context: str,
query: str,
response: project_validate_params.Response,
- use_llm_matching: Optional[bool] | NotGiven = NOT_GIVEN,
- constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- custom_eval_thresholds: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[object] | NotGiven = NOT_GIVEN,
- eval_scores: Optional[Dict[str, float]] | NotGiven = NOT_GIVEN,
- messages: Iterable[project_validate_params.Message] | NotGiven = NOT_GIVEN,
- options: Optional[project_validate_params.Options] | NotGiven = NOT_GIVEN,
- prompt: Optional[str] | NotGiven = NOT_GIVEN,
- quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
- rewritten_question: Optional[str] | NotGiven = NOT_GIVEN,
- task: Optional[str] | NotGiven = NOT_GIVEN,
- tools: Optional[Iterable[project_validate_params.Tool]] | NotGiven = NOT_GIVEN,
- x_client_library_version: str | NotGiven = NOT_GIVEN,
- x_integration_type: str | NotGiven = NOT_GIVEN,
- x_source: str | NotGiven = NOT_GIVEN,
- x_stainless_package_version: str | NotGiven = NOT_GIVEN,
+ use_llm_matching: Optional[bool] | Omit = omit,
+ constrain_outputs: Optional[SequenceNotStr[str]] | Omit = omit,
+ custom_eval_thresholds: Optional[Dict[str, float]] | Omit = omit,
+ custom_metadata: Optional[object] | Omit = omit,
+ eval_scores: Optional[Dict[str, float]] | Omit = omit,
+ messages: Iterable[project_validate_params.Message] | Omit = omit,
+ options: Optional[project_validate_params.Options] | Omit = omit,
+ quality_preset: Literal["best", "high", "medium", "low", "base"] | Omit = omit,
+ rewritten_question: Optional[str] | Omit = omit,
+ task: Optional[str] | Omit = omit,
+ tools: Optional[Iterable[project_validate_params.Tool]] | Omit = omit,
+ x_client_library_version: str | Omit = omit,
+ x_integration_type: str | Omit = omit,
+ x_source: str | Omit = omit,
+ x_stainless_package_version: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> ProjectValidateResponse:
"""
Evaluate whether a response, given the provided query and context, is
@@ -1154,12 +1147,8 @@ async def validate(
When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
This parameter has no effect when `disable_trustworthiness` is True.
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
-
- prompt: The prompt to use for the TLM call. If not provided, the prompt will be
- generated from the messages.
+ disable_trustworthiness (bool, default = False): if True, TLM will not compute trust scores,
+ useful if you only want to compute custom evaluation criteria.
quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
@@ -1203,7 +1192,6 @@ async def validate(
"eval_scores": eval_scores,
"messages": messages,
"options": options,
- "prompt": prompt,
"quality_preset": quality_preset,
"rewritten_question": rewritten_question,
"task": task,
diff --git a/src/codex/resources/projects/query_logs.py b/src/codex/resources/projects/query_logs.py
index 8c939f97..2720dd15 100644
--- a/src/codex/resources/projects/query_logs.py
+++ b/src/codex/resources/projects/query_logs.py
@@ -8,7 +8,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
+from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -73,7 +73,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogRetrieveResponse:
"""
Get Query Log Route
@@ -103,29 +103,31 @@ def list(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
- has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ custom_metadata: Optional[str] | Omit = omit,
+ expert_review_status: Optional[Literal["good", "bad"]] | Omit = omit,
+ failed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
+ guardrailed: Optional[bool] | Omit = omit,
+ has_tool_calls: Optional[bool] | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ passed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
- | NotGiven = NOT_GIVEN,
- sort: Optional[str] | NotGiven = NOT_GIVEN,
- tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
+ | Omit = omit,
+ search_text: Optional[str] | Omit = omit,
+ sort: Optional[str] | Omit = omit,
+ tool_call_names: Optional[SequenceNotStr[str]] | Omit = omit,
+ was_cache_hit: Optional[bool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncOffsetPageQueryLogs[QueryLogListResponse]:
"""
List query logs by project ID.
@@ -137,6 +139,8 @@ def list(
custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}
+ expert_review_status: Filter by expert review status
+
failed_evals: Filter by evals that failed
guardrailed: Filter by guardrailed status
@@ -147,6 +151,20 @@ def list(
primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation)
+ search_text: Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+
+ sort: Field or score to sort by.
+
+ Available fields: 'created_at', 'primary_eval_issue_score'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+
tool_call_names: Filter by names of tools called in the assistant response
was_cache_hit: Filter by cache hit status
@@ -174,6 +192,7 @@ def list(
"created_at_end": created_at_end,
"created_at_start": created_at_start,
"custom_metadata": custom_metadata,
+ "expert_review_status": expert_review_status,
"failed_evals": failed_evals,
"guardrailed": guardrailed,
"has_tool_calls": has_tool_calls,
@@ -182,6 +201,7 @@ def list(
"order": order,
"passed_evals": passed_evals,
"primary_eval_issue": primary_eval_issue,
+ "search_text": search_text,
"sort": sort,
"tool_call_names": tool_call_names,
"was_cache_hit": was_cache_hit,
@@ -203,7 +223,7 @@ def add_user_feedback(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogAddUserFeedbackResponse:
"""
Add User Feedback Route
@@ -236,31 +256,33 @@ def list_by_group(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
- has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ custom_metadata: Optional[str] | Omit = omit,
+ expert_review_status: Optional[Literal["good", "bad"]] | Omit = omit,
+ failed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
+ guardrailed: Optional[bool] | Omit = omit,
+ has_tool_calls: Optional[bool] | Omit = omit,
+ limit: int | Omit = omit,
+ needs_review: Optional[bool] | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ passed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
- | NotGiven = NOT_GIVEN,
- remediation_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
- sort: Optional[str] | NotGiven = NOT_GIVEN,
- tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
+ | Omit = omit,
+ remediation_ids: SequenceNotStr[str] | Omit = omit,
+ search_text: Optional[str] | Omit = omit,
+ sort: Optional[str] | Omit = omit,
+ tool_call_names: Optional[SequenceNotStr[str]] | Omit = omit,
+ was_cache_hit: Optional[bool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogListByGroupResponse:
"""
List query log group by remediation ID.
@@ -272,6 +294,8 @@ def list_by_group(
custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}
+ expert_review_status: Filter by expert review status
+
failed_evals: Filter by evals that failed
guardrailed: Filter by guardrailed status
@@ -286,6 +310,20 @@ def list_by_group(
remediation_ids: List of groups to list child logs for
+ search_text: Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+
+ sort: Field or score to sort by.
+
+ Available fields: 'created_at', 'primary_eval_issue_score'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+
tool_call_names: Filter by names of tools called in the assistant response
was_cache_hit: Filter by cache hit status
@@ -312,6 +350,7 @@ def list_by_group(
"created_at_end": created_at_end,
"created_at_start": created_at_start,
"custom_metadata": custom_metadata,
+ "expert_review_status": expert_review_status,
"failed_evals": failed_evals,
"guardrailed": guardrailed,
"has_tool_calls": has_tool_calls,
@@ -322,6 +361,7 @@ def list_by_group(
"passed_evals": passed_evals,
"primary_eval_issue": primary_eval_issue,
"remediation_ids": remediation_ids,
+ "search_text": search_text,
"sort": sort,
"tool_call_names": tool_call_names,
"was_cache_hit": was_cache_hit,
@@ -336,31 +376,32 @@ def list_groups(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
- has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ custom_metadata: Optional[str] | Omit = omit,
+ expert_review_status: Optional[Literal["good", "bad"]] | Omit = omit,
+ failed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
+ guardrailed: Optional[bool] | Omit = omit,
+ has_tool_calls: Optional[bool] | Omit = omit,
+ limit: int | Omit = omit,
+ needs_review: Optional[bool] | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ passed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
- | NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank", "impact_score"]]
- | NotGiven = NOT_GIVEN,
- tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
+ | Omit = omit,
+ search_text: Optional[str] | Omit = omit,
+ sort: Optional[str] | Omit = omit,
+ tool_call_names: Optional[SequenceNotStr[str]] | Omit = omit,
+ was_cache_hit: Optional[bool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncOffsetPageQueryLogGroups[QueryLogListGroupsResponse]:
"""
List query log groups by project ID.
@@ -372,6 +413,8 @@ def list_groups(
custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}
+ expert_review_status: Filter by expert review status
+
failed_evals: Filter by evals that failed
guardrailed: Filter by guardrailed status
@@ -384,6 +427,21 @@ def list_groups(
primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation)
+ search_text: Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+
+ sort: Field or score to sort by.
+
+ Available fields: 'created_at', 'custom_rank', 'impact_score',
+ 'primary_eval_issue_score', 'total_count'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+
tool_call_names: Filter by names of tools called in the assistant response
was_cache_hit: Filter by cache hit status
@@ -411,6 +469,7 @@ def list_groups(
"created_at_end": created_at_end,
"created_at_start": created_at_start,
"custom_metadata": custom_metadata,
+ "expert_review_status": expert_review_status,
"failed_evals": failed_evals,
"guardrailed": guardrailed,
"has_tool_calls": has_tool_calls,
@@ -420,6 +479,7 @@ def list_groups(
"order": order,
"passed_evals": passed_evals,
"primary_eval_issue": primary_eval_issue,
+ "search_text": search_text,
"sort": sort,
"tool_call_names": tool_call_names,
"was_cache_hit": was_cache_hit,
@@ -440,7 +500,7 @@ def start_remediation(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogStartRemediationResponse:
"""
Start Remediation Route
@@ -477,7 +537,7 @@ def update_metadata(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogUpdateMetadataResponse:
"""
Update Metadata Route
@@ -535,7 +595,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogRetrieveResponse:
"""
Get Query Log Route
@@ -565,29 +625,31 @@ def list(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
- has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ custom_metadata: Optional[str] | Omit = omit,
+ expert_review_status: Optional[Literal["good", "bad"]] | Omit = omit,
+ failed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
+ guardrailed: Optional[bool] | Omit = omit,
+ has_tool_calls: Optional[bool] | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ passed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
- | NotGiven = NOT_GIVEN,
- sort: Optional[str] | NotGiven = NOT_GIVEN,
- tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
+ | Omit = omit,
+ search_text: Optional[str] | Omit = omit,
+ sort: Optional[str] | Omit = omit,
+ tool_call_names: Optional[SequenceNotStr[str]] | Omit = omit,
+ was_cache_hit: Optional[bool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[QueryLogListResponse, AsyncOffsetPageQueryLogs[QueryLogListResponse]]:
"""
List query logs by project ID.
@@ -599,6 +661,8 @@ def list(
custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}
+ expert_review_status: Filter by expert review status
+
failed_evals: Filter by evals that failed
guardrailed: Filter by guardrailed status
@@ -609,6 +673,20 @@ def list(
primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation)
+ search_text: Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+
+ sort: Field or score to sort by.
+
+ Available fields: 'created_at', 'primary_eval_issue_score'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+
tool_call_names: Filter by names of tools called in the assistant response
was_cache_hit: Filter by cache hit status
@@ -636,6 +714,7 @@ def list(
"created_at_end": created_at_end,
"created_at_start": created_at_start,
"custom_metadata": custom_metadata,
+ "expert_review_status": expert_review_status,
"failed_evals": failed_evals,
"guardrailed": guardrailed,
"has_tool_calls": has_tool_calls,
@@ -644,6 +723,7 @@ def list(
"order": order,
"passed_evals": passed_evals,
"primary_eval_issue": primary_eval_issue,
+ "search_text": search_text,
"sort": sort,
"tool_call_names": tool_call_names,
"was_cache_hit": was_cache_hit,
@@ -665,7 +745,7 @@ async def add_user_feedback(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogAddUserFeedbackResponse:
"""
Add User Feedback Route
@@ -700,31 +780,33 @@ async def list_by_group(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
- has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ custom_metadata: Optional[str] | Omit = omit,
+ expert_review_status: Optional[Literal["good", "bad"]] | Omit = omit,
+ failed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
+ guardrailed: Optional[bool] | Omit = omit,
+ has_tool_calls: Optional[bool] | Omit = omit,
+ limit: int | Omit = omit,
+ needs_review: Optional[bool] | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ passed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
- | NotGiven = NOT_GIVEN,
- remediation_ids: SequenceNotStr[str] | NotGiven = NOT_GIVEN,
- sort: Optional[str] | NotGiven = NOT_GIVEN,
- tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
+ | Omit = omit,
+ remediation_ids: SequenceNotStr[str] | Omit = omit,
+ search_text: Optional[str] | Omit = omit,
+ sort: Optional[str] | Omit = omit,
+ tool_call_names: Optional[SequenceNotStr[str]] | Omit = omit,
+ was_cache_hit: Optional[bool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogListByGroupResponse:
"""
List query log group by remediation ID.
@@ -736,6 +818,8 @@ async def list_by_group(
custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}
+ expert_review_status: Filter by expert review status
+
failed_evals: Filter by evals that failed
guardrailed: Filter by guardrailed status
@@ -750,6 +834,20 @@ async def list_by_group(
remediation_ids: List of groups to list child logs for
+ search_text: Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+
+ sort: Field or score to sort by.
+
+ Available fields: 'created_at', 'primary_eval_issue_score'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+
tool_call_names: Filter by names of tools called in the assistant response
was_cache_hit: Filter by cache hit status
@@ -776,6 +874,7 @@ async def list_by_group(
"created_at_end": created_at_end,
"created_at_start": created_at_start,
"custom_metadata": custom_metadata,
+ "expert_review_status": expert_review_status,
"failed_evals": failed_evals,
"guardrailed": guardrailed,
"has_tool_calls": has_tool_calls,
@@ -786,6 +885,7 @@ async def list_by_group(
"passed_evals": passed_evals,
"primary_eval_issue": primary_eval_issue,
"remediation_ids": remediation_ids,
+ "search_text": search_text,
"sort": sort,
"tool_call_names": tool_call_names,
"was_cache_hit": was_cache_hit,
@@ -800,31 +900,32 @@ def list_groups(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- custom_metadata: Optional[str] | NotGiven = NOT_GIVEN,
- failed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- guardrailed: Optional[bool] | NotGiven = NOT_GIVEN,
- has_tool_calls: Optional[bool] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- needs_review: Optional[bool] | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- passed_evals: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ custom_metadata: Optional[str] | Omit = omit,
+ expert_review_status: Optional[Literal["good", "bad"]] | Omit = omit,
+ failed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
+ guardrailed: Optional[bool] | Omit = omit,
+ has_tool_calls: Optional[bool] | Omit = omit,
+ limit: int | Omit = omit,
+ needs_review: Optional[bool] | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ passed_evals: Optional[SequenceNotStr[str]] | Omit = omit,
primary_eval_issue: Optional[
List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
]
- | NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank", "impact_score"]]
- | NotGiven = NOT_GIVEN,
- tool_call_names: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- was_cache_hit: Optional[bool] | NotGiven = NOT_GIVEN,
+ | Omit = omit,
+ search_text: Optional[str] | Omit = omit,
+ sort: Optional[str] | Omit = omit,
+ tool_call_names: Optional[SequenceNotStr[str]] | Omit = omit,
+ was_cache_hit: Optional[bool] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[QueryLogListGroupsResponse, AsyncOffsetPageQueryLogGroups[QueryLogListGroupsResponse]]:
"""
List query log groups by project ID.
@@ -836,6 +937,8 @@ def list_groups(
custom_metadata: Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}
+ expert_review_status: Filter by expert review status
+
failed_evals: Filter by evals that failed
guardrailed: Filter by guardrailed status
@@ -848,6 +951,21 @@ def list_groups(
primary_eval_issue: Filter logs that have ANY of these primary evaluation issues (OR operation)
+ search_text: Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+
+ sort: Field or score to sort by.
+
+ Available fields: 'created_at', 'custom_rank', 'impact_score',
+ 'primary_eval_issue_score', 'total_count'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+
tool_call_names: Filter by names of tools called in the assistant response
was_cache_hit: Filter by cache hit status
@@ -875,6 +993,7 @@ def list_groups(
"created_at_end": created_at_end,
"created_at_start": created_at_start,
"custom_metadata": custom_metadata,
+ "expert_review_status": expert_review_status,
"failed_evals": failed_evals,
"guardrailed": guardrailed,
"has_tool_calls": has_tool_calls,
@@ -884,6 +1003,7 @@ def list_groups(
"order": order,
"passed_evals": passed_evals,
"primary_eval_issue": primary_eval_issue,
+ "search_text": search_text,
"sort": sort,
"tool_call_names": tool_call_names,
"was_cache_hit": was_cache_hit,
@@ -904,7 +1024,7 @@ async def start_remediation(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogStartRemediationResponse:
"""
Start Remediation Route
@@ -941,7 +1061,7 @@ async def update_metadata(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> QueryLogUpdateMetadataResponse:
"""
Update Metadata Route
diff --git a/src/codex/resources/projects/remediations.py b/src/codex/resources/projects/remediations.py
index ea0168c7..11642624 100644
--- a/src/codex/resources/projects/remediations.py
+++ b/src/codex/resources/projects/remediations.py
@@ -8,7 +8,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -65,14 +65,14 @@ def create(
project_id: str,
*,
question: str,
- answer: Optional[str] | NotGiven = NOT_GIVEN,
- draft_answer: Optional[str] | NotGiven = NOT_GIVEN,
+ answer: Optional[str] | Omit = omit,
+ draft_answer: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationCreateResponse:
"""
Create Remediation Route
@@ -114,7 +114,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationRetrieveResponse:
"""
Get Remediation Route
@@ -144,22 +144,22 @@ def list(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- last_edited_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- last_edited_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- last_edited_by: Optional[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] | NotGiven = NOT_GIVEN,
- status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ last_edited_at_end: Union[str, datetime, None] | Omit = omit,
+ last_edited_at_start: Union[str, datetime, None] | Omit = omit,
+ last_edited_by: Optional[str] | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] | Omit = omit,
+ status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> SyncOffsetPageRemediations[RemediationListResponse]:
"""
List remediations by project ID.
@@ -224,7 +224,7 @@ def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Remediation Route
@@ -262,7 +262,7 @@ def edit_answer(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationEditAnswerResponse:
"""
Edit Answer Route
@@ -300,7 +300,7 @@ def edit_draft_answer(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationEditDraftAnswerResponse:
"""
Edit Draft Answer Route
@@ -339,7 +339,7 @@ def get_resolved_logs_count(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationGetResolvedLogsCountResponse:
"""
Get Remediation With Resolved Logs Count Route
@@ -375,7 +375,7 @@ def list_resolved_logs(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationListResolvedLogsResponse:
"""
List resolved logs by remediation ID.
@@ -411,7 +411,7 @@ def pause(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationPauseResponse:
"""
Pause Remediation Route
@@ -447,7 +447,7 @@ def publish(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationPublishResponse:
"""
Publish Remediation Route
@@ -483,7 +483,7 @@ def unpause(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationUnpauseResponse:
"""
Unpause Remediation Route
@@ -535,14 +535,14 @@ async def create(
project_id: str,
*,
question: str,
- answer: Optional[str] | NotGiven = NOT_GIVEN,
- draft_answer: Optional[str] | NotGiven = NOT_GIVEN,
+ answer: Optional[str] | Omit = omit,
+ draft_answer: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationCreateResponse:
"""
Create Remediation Route
@@ -584,7 +584,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationRetrieveResponse:
"""
Get Remediation Route
@@ -614,22 +614,22 @@ def list(
self,
project_id: str,
*,
- created_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- created_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- last_edited_at_end: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- last_edited_at_start: Union[str, datetime, None] | NotGiven = NOT_GIVEN,
- last_edited_by: Optional[str] | NotGiven = NOT_GIVEN,
- limit: int | NotGiven = NOT_GIVEN,
- offset: int | NotGiven = NOT_GIVEN,
- order: Literal["asc", "desc"] | NotGiven = NOT_GIVEN,
- sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] | NotGiven = NOT_GIVEN,
- status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] | NotGiven = NOT_GIVEN,
+ created_at_end: Union[str, datetime, None] | Omit = omit,
+ created_at_start: Union[str, datetime, None] | Omit = omit,
+ last_edited_at_end: Union[str, datetime, None] | Omit = omit,
+ last_edited_at_start: Union[str, datetime, None] | Omit = omit,
+ last_edited_by: Optional[str] | Omit = omit,
+ limit: int | Omit = omit,
+ offset: int | Omit = omit,
+ order: Literal["asc", "desc"] | Omit = omit,
+ sort: Optional[Literal["created_at", "last_edited_at", "resolved_logs_count"]] | Omit = omit,
+ status: Optional[List[Literal["ACTIVE", "DRAFT", "ACTIVE_WITH_DRAFT", "PAUSED"]]] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> AsyncPaginator[RemediationListResponse, AsyncOffsetPageRemediations[RemediationListResponse]]:
"""
List remediations by project ID.
@@ -694,7 +694,7 @@ async def delete(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> None:
"""
Delete Remediation Route
@@ -732,7 +732,7 @@ async def edit_answer(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationEditAnswerResponse:
"""
Edit Answer Route
@@ -772,7 +772,7 @@ async def edit_draft_answer(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationEditDraftAnswerResponse:
"""
Edit Draft Answer Route
@@ -811,7 +811,7 @@ async def get_resolved_logs_count(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationGetResolvedLogsCountResponse:
"""
Get Remediation With Resolved Logs Count Route
@@ -847,7 +847,7 @@ async def list_resolved_logs(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationListResolvedLogsResponse:
"""
List resolved logs by remediation ID.
@@ -883,7 +883,7 @@ async def pause(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationPauseResponse:
"""
Pause Remediation Route
@@ -919,7 +919,7 @@ async def publish(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationPublishResponse:
"""
Publish Remediation Route
@@ -955,7 +955,7 @@ async def unpause(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> RemediationUnpauseResponse:
"""
Unpause Remediation Route
diff --git a/src/codex/resources/tlm.py b/src/codex/resources/tlm.py
deleted file mode 100644
index de652ef9..00000000
--- a/src/codex/resources/tlm.py
+++ /dev/null
@@ -1,677 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Optional
-from typing_extensions import Literal
-
-import httpx
-
-from ..types import tlm_score_params, tlm_prompt_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven, SequenceNotStr
-from .._utils import maybe_transform, async_maybe_transform
-from .._compat import cached_property
-from .._resource import SyncAPIResource, AsyncAPIResource
-from .._response import (
- to_raw_response_wrapper,
- to_streamed_response_wrapper,
- async_to_raw_response_wrapper,
- async_to_streamed_response_wrapper,
-)
-from .._base_client import make_request_options
-from ..types.tlm_score_response import TlmScoreResponse
-from ..types.tlm_prompt_response import TlmPromptResponse
-
-__all__ = ["TlmResource", "AsyncTlmResource"]
-
-
-class TlmResource(SyncAPIResource):
- @cached_property
- def with_raw_response(self) -> TlmResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers
- """
- return TlmResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> TlmResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response
- """
- return TlmResourceWithStreamingResponse(self)
-
- def prompt(
- self,
- *,
- prompt: str,
- constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- options: Optional[tlm_prompt_params.Options] | NotGiven = NOT_GIVEN,
- quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
- task: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> TlmPromptResponse:
- """
- Prompts the TLM API.
-
- Args:
- options: Typed dict of advanced configuration options for the Trustworthy Language Model.
- Many of these configurations are determined by the quality preset selected
- (learn about quality presets in the TLM [initialization method](./#class-tlm)).
- Specifying TLMOptions values directly overrides any default values set from the
- quality preset.
-
- For all options described below, higher settings will lead to longer runtimes
- and may consume more tokens internally. You may not be able to run long prompts
- (or prompts with long responses) in your account, unless your token/rate limits
- are increased. If you hit token limit issues, try lower/less expensive
- TLMOptions to be able to run longer prompts/responses, or contact Cleanlab to
- increase your limits.
-
- The default values corresponding to each quality preset are:
-
- - **best:** `num_consistency_samples` = 8, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **high:** `num_consistency_samples` = 4, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **medium:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **low:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"none"`.
- - **base:** `num_consistency_samples` = 0, `num_self_reflections` = 1,
- `reasoning_effort` = `"none"`.
-
- By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base
- `model`, and `max_tokens` is set to 512. You can set custom values for these
- arguments regardless of the quality preset specified.
-
- Args: model ({"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "o4-mini", "o3", "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o",
- "o3-mini", "o1", "o1-mini", "gpt-4", "gpt-3.5-turbo-16k", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-sonnet-v2",
- "claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
- "nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
- (better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
-
- log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
- For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
-
- custom_eval_criteria (list[dict[str, Any]], default = []): optionally specify custom evalution criteria beyond the built-in trustworthiness scoring.
- The expected input format is a list of dictionaries, where each dictionary has the following keys:
- - name: Name of the evaluation criteria.
- - criteria: Instructions specifying the evaluation criteria.
-
- max_tokens (int, default = 512): the maximum number of tokens that can be generated in the response from `TLM.prompt()` as well as during internal trustworthiness scoring.
- If you experience token/rate-limit errors, try lowering this number.
- For OpenAI models, this parameter must be between 64 and 4096. For Claude models, this parameter must be between 64 and 512.
-
- reasoning_effort ({"none", "low", "medium", "high"}, default = "high"): how much internal LLM calls are allowed to reason (number of thinking tokens)
- when generating alternative possible responses and reflecting on responses during trustworthiness scoring.
- Reduce this value to reduce runtimes. Higher values may improve trust scoring.
-
- num_self_reflections (int, default = 3): the number of different evaluations to perform where the LLM reflects on the response, a factor affecting trust scoring.
- The maximum number currently supported is 3. Lower values can reduce runtimes.
- Reflection helps quantify aleatoric uncertainty associated with challenging prompts and catches responses that are noticeably incorrect/bad upon further analysis.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- num_consistency_samples (int, default = 8): the amount of internal sampling to measure LLM response consistency, a factor affecting trust scoring.
- Must be between 0 and 20. Lower values can reduce runtimes.
- Measuring consistency helps quantify the epistemic uncertainty associated with
- strange prompts or prompts that are too vague/open-ended to receive a clearly defined 'good' response.
- TLM measures consistency via the degree of contradiction between sampled responses that the model considers plausible.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "discrepancy"): how the
- trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model.
- Supported similarity measures include - "semantic" (based on natural language inference),
- "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model),
- "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies),
- and "string" (based on character/word overlap). Set this to "string" for minimal runtimes.
- This parameter has no effect when `num_consistency_samples = 0`.
-
- num_candidate_responses (int, default = 1): how many alternative candidate responses are internally generated in `TLM.prompt()`.
- `TLM.prompt()` scores the trustworthiness of each candidate response, and then returns the most trustworthy one.
- You can auto-improve responses by increasing this parameter, but at higher runtimes/costs.
- This parameter must be between 1 and 20. It has no effect on `TLM.score()`.
- When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
-
- quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/api/tlm/prompt",
- body=maybe_transform(
- {
- "prompt": prompt,
- "constrain_outputs": constrain_outputs,
- "options": options,
- "quality_preset": quality_preset,
- "task": task,
- },
- tlm_prompt_params.TlmPromptParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=TlmPromptResponse,
- )
-
- def score(
- self,
- *,
- prompt: str,
- response: str,
- constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- options: Optional[tlm_score_params.Options] | NotGiven = NOT_GIVEN,
- quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
- task: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> TlmScoreResponse:
- """
- Scores the TLM API.
-
- TODO:
-
- - Track query count in DB
- - Enforce hard cap on queries for users w/o credit card on file
-
- Args:
- options: Typed dict of advanced configuration options for the Trustworthy Language Model.
- Many of these configurations are determined by the quality preset selected
- (learn about quality presets in the TLM [initialization method](./#class-tlm)).
- Specifying TLMOptions values directly overrides any default values set from the
- quality preset.
-
- For all options described below, higher settings will lead to longer runtimes
- and may consume more tokens internally. You may not be able to run long prompts
- (or prompts with long responses) in your account, unless your token/rate limits
- are increased. If you hit token limit issues, try lower/less expensive
- TLMOptions to be able to run longer prompts/responses, or contact Cleanlab to
- increase your limits.
-
- The default values corresponding to each quality preset are:
-
- - **best:** `num_consistency_samples` = 8, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **high:** `num_consistency_samples` = 4, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **medium:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **low:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"none"`.
- - **base:** `num_consistency_samples` = 0, `num_self_reflections` = 1,
- `reasoning_effort` = `"none"`.
-
- By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base
- `model`, and `max_tokens` is set to 512. You can set custom values for these
- arguments regardless of the quality preset specified.
-
- Args: model ({"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "o4-mini", "o3", "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o",
- "o3-mini", "o1", "o1-mini", "gpt-4", "gpt-3.5-turbo-16k", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-sonnet-v2",
- "claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
- "nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
- (better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
-
- log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
- For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
-
- custom_eval_criteria (list[dict[str, Any]], default = []): optionally specify custom evalution criteria beyond the built-in trustworthiness scoring.
- The expected input format is a list of dictionaries, where each dictionary has the following keys:
- - name: Name of the evaluation criteria.
- - criteria: Instructions specifying the evaluation criteria.
-
- max_tokens (int, default = 512): the maximum number of tokens that can be generated in the response from `TLM.prompt()` as well as during internal trustworthiness scoring.
- If you experience token/rate-limit errors, try lowering this number.
- For OpenAI models, this parameter must be between 64 and 4096. For Claude models, this parameter must be between 64 and 512.
-
- reasoning_effort ({"none", "low", "medium", "high"}, default = "high"): how much internal LLM calls are allowed to reason (number of thinking tokens)
- when generating alternative possible responses and reflecting on responses during trustworthiness scoring.
- Reduce this value to reduce runtimes. Higher values may improve trust scoring.
-
- num_self_reflections (int, default = 3): the number of different evaluations to perform where the LLM reflects on the response, a factor affecting trust scoring.
- The maximum number currently supported is 3. Lower values can reduce runtimes.
- Reflection helps quantify aleatoric uncertainty associated with challenging prompts and catches responses that are noticeably incorrect/bad upon further analysis.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- num_consistency_samples (int, default = 8): the amount of internal sampling to measure LLM response consistency, a factor affecting trust scoring.
- Must be between 0 and 20. Lower values can reduce runtimes.
- Measuring consistency helps quantify the epistemic uncertainty associated with
- strange prompts or prompts that are too vague/open-ended to receive a clearly defined 'good' response.
- TLM measures consistency via the degree of contradiction between sampled responses that the model considers plausible.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "discrepancy"): how the
- trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model.
- Supported similarity measures include - "semantic" (based on natural language inference),
- "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model),
- "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies),
- and "string" (based on character/word overlap). Set this to "string" for minimal runtimes.
- This parameter has no effect when `num_consistency_samples = 0`.
-
- num_candidate_responses (int, default = 1): how many alternative candidate responses are internally generated in `TLM.prompt()`.
- `TLM.prompt()` scores the trustworthiness of each candidate response, and then returns the most trustworthy one.
- You can auto-improve responses by increasing this parameter, but at higher runtimes/costs.
- This parameter must be between 1 and 20. It has no effect on `TLM.score()`.
- When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
-
- quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return self._post(
- "/api/tlm/score",
- body=maybe_transform(
- {
- "prompt": prompt,
- "response": response,
- "constrain_outputs": constrain_outputs,
- "options": options,
- "quality_preset": quality_preset,
- "task": task,
- },
- tlm_score_params.TlmScoreParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=TlmScoreResponse,
- )
-
-
-class AsyncTlmResource(AsyncAPIResource):
- @cached_property
- def with_raw_response(self) -> AsyncTlmResourceWithRawResponse:
- """
- This property can be used as a prefix for any HTTP method call to return
- the raw response object instead of the parsed content.
-
- For more information, see https://www.github.com/cleanlab/codex-python#accessing-raw-response-data-eg-headers
- """
- return AsyncTlmResourceWithRawResponse(self)
-
- @cached_property
- def with_streaming_response(self) -> AsyncTlmResourceWithStreamingResponse:
- """
- An alternative to `.with_raw_response` that doesn't eagerly read the response body.
-
- For more information, see https://www.github.com/cleanlab/codex-python#with_streaming_response
- """
- return AsyncTlmResourceWithStreamingResponse(self)
-
- async def prompt(
- self,
- *,
- prompt: str,
- constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- options: Optional[tlm_prompt_params.Options] | NotGiven = NOT_GIVEN,
- quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
- task: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> TlmPromptResponse:
- """
- Prompts the TLM API.
-
- Args:
- options: Typed dict of advanced configuration options for the Trustworthy Language Model.
- Many of these configurations are determined by the quality preset selected
- (learn about quality presets in the TLM [initialization method](./#class-tlm)).
- Specifying TLMOptions values directly overrides any default values set from the
- quality preset.
-
- For all options described below, higher settings will lead to longer runtimes
- and may consume more tokens internally. You may not be able to run long prompts
- (or prompts with long responses) in your account, unless your token/rate limits
- are increased. If you hit token limit issues, try lower/less expensive
- TLMOptions to be able to run longer prompts/responses, or contact Cleanlab to
- increase your limits.
-
- The default values corresponding to each quality preset are:
-
- - **best:** `num_consistency_samples` = 8, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **high:** `num_consistency_samples` = 4, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **medium:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **low:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"none"`.
- - **base:** `num_consistency_samples` = 0, `num_self_reflections` = 1,
- `reasoning_effort` = `"none"`.
-
- By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base
- `model`, and `max_tokens` is set to 512. You can set custom values for these
- arguments regardless of the quality preset specified.
-
- Args: model ({"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "o4-mini", "o3", "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o",
- "o3-mini", "o1", "o1-mini", "gpt-4", "gpt-3.5-turbo-16k", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-sonnet-v2",
- "claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
- "nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
- (better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
-
- log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
- For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
-
- custom_eval_criteria (list[dict[str, Any]], default = []): optionally specify custom evalution criteria beyond the built-in trustworthiness scoring.
- The expected input format is a list of dictionaries, where each dictionary has the following keys:
- - name: Name of the evaluation criteria.
- - criteria: Instructions specifying the evaluation criteria.
-
- max_tokens (int, default = 512): the maximum number of tokens that can be generated in the response from `TLM.prompt()` as well as during internal trustworthiness scoring.
- If you experience token/rate-limit errors, try lowering this number.
- For OpenAI models, this parameter must be between 64 and 4096. For Claude models, this parameter must be between 64 and 512.
-
- reasoning_effort ({"none", "low", "medium", "high"}, default = "high"): how much internal LLM calls are allowed to reason (number of thinking tokens)
- when generating alternative possible responses and reflecting on responses during trustworthiness scoring.
- Reduce this value to reduce runtimes. Higher values may improve trust scoring.
-
- num_self_reflections (int, default = 3): the number of different evaluations to perform where the LLM reflects on the response, a factor affecting trust scoring.
- The maximum number currently supported is 3. Lower values can reduce runtimes.
- Reflection helps quantify aleatoric uncertainty associated with challenging prompts and catches responses that are noticeably incorrect/bad upon further analysis.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- num_consistency_samples (int, default = 8): the amount of internal sampling to measure LLM response consistency, a factor affecting trust scoring.
- Must be between 0 and 20. Lower values can reduce runtimes.
- Measuring consistency helps quantify the epistemic uncertainty associated with
- strange prompts or prompts that are too vague/open-ended to receive a clearly defined 'good' response.
- TLM measures consistency via the degree of contradiction between sampled responses that the model considers plausible.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "discrepancy"): how the
- trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model.
- Supported similarity measures include - "semantic" (based on natural language inference),
- "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model),
- "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies),
- and "string" (based on character/word overlap). Set this to "string" for minimal runtimes.
- This parameter has no effect when `num_consistency_samples = 0`.
-
- num_candidate_responses (int, default = 1): how many alternative candidate responses are internally generated in `TLM.prompt()`.
- `TLM.prompt()` scores the trustworthiness of each candidate response, and then returns the most trustworthy one.
- You can auto-improve responses by increasing this parameter, but at higher runtimes/costs.
- This parameter must be between 1 and 20. It has no effect on `TLM.score()`.
- When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
-
- quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/api/tlm/prompt",
- body=await async_maybe_transform(
- {
- "prompt": prompt,
- "constrain_outputs": constrain_outputs,
- "options": options,
- "quality_preset": quality_preset,
- "task": task,
- },
- tlm_prompt_params.TlmPromptParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=TlmPromptResponse,
- )
-
- async def score(
- self,
- *,
- prompt: str,
- response: str,
- constrain_outputs: Optional[SequenceNotStr[str]] | NotGiven = NOT_GIVEN,
- options: Optional[tlm_score_params.Options] | NotGiven = NOT_GIVEN,
- quality_preset: Literal["best", "high", "medium", "low", "base"] | NotGiven = NOT_GIVEN,
- task: Optional[str] | NotGiven = NOT_GIVEN,
- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
- # The extra values given here take precedence over values defined on the client or passed to this method.
- extra_headers: Headers | None = None,
- extra_query: Query | None = None,
- extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> TlmScoreResponse:
- """
- Scores the TLM API.
-
- TODO:
-
- - Track query count in DB
- - Enforce hard cap on queries for users w/o credit card on file
-
- Args:
- options: Typed dict of advanced configuration options for the Trustworthy Language Model.
- Many of these configurations are determined by the quality preset selected
- (learn about quality presets in the TLM [initialization method](./#class-tlm)).
- Specifying TLMOptions values directly overrides any default values set from the
- quality preset.
-
- For all options described below, higher settings will lead to longer runtimes
- and may consume more tokens internally. You may not be able to run long prompts
- (or prompts with long responses) in your account, unless your token/rate limits
- are increased. If you hit token limit issues, try lower/less expensive
- TLMOptions to be able to run longer prompts/responses, or contact Cleanlab to
- increase your limits.
-
- The default values corresponding to each quality preset are:
-
- - **best:** `num_consistency_samples` = 8, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **high:** `num_consistency_samples` = 4, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **medium:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **low:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"none"`.
- - **base:** `num_consistency_samples` = 0, `num_self_reflections` = 1,
- `reasoning_effort` = `"none"`.
-
- By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base
- `model`, and `max_tokens` is set to 512. You can set custom values for these
- arguments regardless of the quality preset specified.
-
- Args: model ({"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "o4-mini", "o3", "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o",
- "o3-mini", "o1", "o1-mini", "gpt-4", "gpt-3.5-turbo-16k", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-sonnet-v2",
- "claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
- "nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
- (better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
-
- log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
- For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
-
- custom_eval_criteria (list[dict[str, Any]], default = []): optionally specify custom evalution criteria beyond the built-in trustworthiness scoring.
- The expected input format is a list of dictionaries, where each dictionary has the following keys:
- - name: Name of the evaluation criteria.
- - criteria: Instructions specifying the evaluation criteria.
-
- max_tokens (int, default = 512): the maximum number of tokens that can be generated in the response from `TLM.prompt()` as well as during internal trustworthiness scoring.
- If you experience token/rate-limit errors, try lowering this number.
- For OpenAI models, this parameter must be between 64 and 4096. For Claude models, this parameter must be between 64 and 512.
-
- reasoning_effort ({"none", "low", "medium", "high"}, default = "high"): how much internal LLM calls are allowed to reason (number of thinking tokens)
- when generating alternative possible responses and reflecting on responses during trustworthiness scoring.
- Reduce this value to reduce runtimes. Higher values may improve trust scoring.
-
- num_self_reflections (int, default = 3): the number of different evaluations to perform where the LLM reflects on the response, a factor affecting trust scoring.
- The maximum number currently supported is 3. Lower values can reduce runtimes.
- Reflection helps quantify aleatoric uncertainty associated with challenging prompts and catches responses that are noticeably incorrect/bad upon further analysis.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- num_consistency_samples (int, default = 8): the amount of internal sampling to measure LLM response consistency, a factor affecting trust scoring.
- Must be between 0 and 20. Lower values can reduce runtimes.
- Measuring consistency helps quantify the epistemic uncertainty associated with
- strange prompts or prompts that are too vague/open-ended to receive a clearly defined 'good' response.
- TLM measures consistency via the degree of contradiction between sampled responses that the model considers plausible.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "discrepancy"): how the
- trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model.
- Supported similarity measures include - "semantic" (based on natural language inference),
- "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model),
- "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies),
- and "string" (based on character/word overlap). Set this to "string" for minimal runtimes.
- This parameter has no effect when `num_consistency_samples = 0`.
-
- num_candidate_responses (int, default = 1): how many alternative candidate responses are internally generated in `TLM.prompt()`.
- `TLM.prompt()` scores the trustworthiness of each candidate response, and then returns the most trustworthy one.
- You can auto-improve responses by increasing this parameter, but at higher runtimes/costs.
- This parameter must be between 1 and 20. It has no effect on `TLM.score()`.
- When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
-
- quality_preset: The quality preset to use for the TLM or Trustworthy RAG API.
-
- extra_headers: Send extra headers
-
- extra_query: Add additional query parameters to the request
-
- extra_body: Add additional JSON properties to the request
-
- timeout: Override the client-level default timeout for this request, in seconds
- """
- return await self._post(
- "/api/tlm/score",
- body=await async_maybe_transform(
- {
- "prompt": prompt,
- "response": response,
- "constrain_outputs": constrain_outputs,
- "options": options,
- "quality_preset": quality_preset,
- "task": task,
- },
- tlm_score_params.TlmScoreParams,
- ),
- options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
- ),
- cast_to=TlmScoreResponse,
- )
-
-
-class TlmResourceWithRawResponse:
- def __init__(self, tlm: TlmResource) -> None:
- self._tlm = tlm
-
- self.prompt = to_raw_response_wrapper(
- tlm.prompt,
- )
- self.score = to_raw_response_wrapper(
- tlm.score,
- )
-
-
-class AsyncTlmResourceWithRawResponse:
- def __init__(self, tlm: AsyncTlmResource) -> None:
- self._tlm = tlm
-
- self.prompt = async_to_raw_response_wrapper(
- tlm.prompt,
- )
- self.score = async_to_raw_response_wrapper(
- tlm.score,
- )
-
-
-class TlmResourceWithStreamingResponse:
- def __init__(self, tlm: TlmResource) -> None:
- self._tlm = tlm
-
- self.prompt = to_streamed_response_wrapper(
- tlm.prompt,
- )
- self.score = to_streamed_response_wrapper(
- tlm.score,
- )
-
-
-class AsyncTlmResourceWithStreamingResponse:
- def __init__(self, tlm: AsyncTlmResource) -> None:
- self._tlm = tlm
-
- self.prompt = async_to_streamed_response_wrapper(
- tlm.prompt,
- )
- self.score = async_to_streamed_response_wrapper(
- tlm.score,
- )
diff --git a/src/codex/resources/users/myself/api_key.py b/src/codex/resources/users/myself/api_key.py
index 72f1502b..d8a20253 100644
--- a/src/codex/resources/users/myself/api_key.py
+++ b/src/codex/resources/users/myself/api_key.py
@@ -4,7 +4,7 @@
import httpx
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -48,7 +48,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchemaPublic:
"""Get user when authenticated with API key."""
return self._get(
@@ -67,7 +67,7 @@ def refresh(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchema:
"""Refresh the API key for an authenticated user"""
return self._post(
@@ -107,7 +107,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchemaPublic:
"""Get user when authenticated with API key."""
return await self._get(
@@ -126,7 +126,7 @@ async def refresh(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchema:
"""Refresh the API key for an authenticated user"""
return await self._post(
diff --git a/src/codex/resources/users/myself/myself.py b/src/codex/resources/users/myself/myself.py
index 3ee27229..ece8b611 100644
--- a/src/codex/resources/users/myself/myself.py
+++ b/src/codex/resources/users/myself/myself.py
@@ -12,7 +12,7 @@
APIKeyResourceWithStreamingResponse,
AsyncAPIKeyResourceWithStreamingResponse,
)
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -71,7 +71,7 @@ def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchemaPublic:
"""Get user info for frontend."""
return self._get(
@@ -119,7 +119,7 @@ async def retrieve(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchemaPublic:
"""Get user info for frontend."""
return await self._get(
diff --git a/src/codex/resources/users/myself/organizations.py b/src/codex/resources/users/myself/organizations.py
index 2d5b7127..ca95590d 100644
--- a/src/codex/resources/users/myself/organizations.py
+++ b/src/codex/resources/users/myself/organizations.py
@@ -4,7 +4,7 @@
import httpx
-from ...._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ...._types import Body, Query, Headers, NotGiven, not_given
from ...._compat import cached_property
from ...._resource import SyncAPIResource, AsyncAPIResource
from ...._response import (
@@ -47,7 +47,7 @@ def list(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserOrganizationsSchema:
"""Get the organizations for an authenticated user"""
return self._get(
@@ -87,7 +87,7 @@ async def list(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserOrganizationsSchema:
"""Get the organizations for an authenticated user"""
return await self._get(
diff --git a/src/codex/resources/users/users.py b/src/codex/resources/users/users.py
index d207a96d..2d2dfaef 100644
--- a/src/codex/resources/users/users.py
+++ b/src/codex/resources/users/users.py
@@ -8,7 +8,7 @@
import httpx
from ...types import user_activate_account_params
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -73,17 +73,17 @@ def activate_account(
*,
first_name: str,
last_name: str,
- account_activated_at: Union[str, datetime] | NotGiven = NOT_GIVEN,
- discovery_source: Optional[str] | NotGiven = NOT_GIVEN,
- is_account_activated: bool | NotGiven = NOT_GIVEN,
- phone_number: Optional[str] | NotGiven = NOT_GIVEN,
- user_provided_company_name: Optional[str] | NotGiven = NOT_GIVEN,
+ account_activated_at: Union[str, datetime] | Omit = omit,
+ discovery_source: Optional[str] | Omit = omit,
+ is_account_activated: bool | Omit = omit,
+ phone_number: Optional[str] | Omit = omit,
+ user_provided_company_name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchemaPublic:
"""
Activate an authenticated user's account
@@ -151,17 +151,17 @@ async def activate_account(
*,
first_name: str,
last_name: str,
- account_activated_at: Union[str, datetime] | NotGiven = NOT_GIVEN,
- discovery_source: Optional[str] | NotGiven = NOT_GIVEN,
- is_account_activated: bool | NotGiven = NOT_GIVEN,
- phone_number: Optional[str] | NotGiven = NOT_GIVEN,
- user_provided_company_name: Optional[str] | NotGiven = NOT_GIVEN,
+ account_activated_at: Union[str, datetime] | Omit = omit,
+ discovery_source: Optional[str] | Omit = omit,
+ is_account_activated: bool | Omit = omit,
+ phone_number: Optional[str] | Omit = omit,
+ user_provided_company_name: Optional[str] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> UserSchemaPublic:
"""
Activate an authenticated user's account
diff --git a/src/codex/resources/users/verification.py b/src/codex/resources/users/verification.py
index e75326e1..d4ef02d9 100644
--- a/src/codex/resources/users/verification.py
+++ b/src/codex/resources/users/verification.py
@@ -4,7 +4,7 @@
import httpx
-from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._types import Body, Query, Headers, NotGiven, not_given
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
@@ -47,7 +47,7 @@ def resend(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VerificationResendResponse:
"""Resend verification email to the specified user through Auth0."""
return self._post(
@@ -87,7 +87,7 @@ async def resend(
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
- timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> VerificationResendResponse:
"""Resend verification email to the specified user through Auth0."""
return await self._post(
diff --git a/src/codex/types/__init__.py b/src/codex/types/__init__.py
index daa16358..322b513b 100644
--- a/src/codex/types/__init__.py
+++ b/src/codex/types/__init__.py
@@ -2,11 +2,7 @@
from __future__ import annotations
-from .tlm_score_params import TlmScoreParams as TlmScoreParams
-from .tlm_prompt_params import TlmPromptParams as TlmPromptParams
-from .tlm_score_response import TlmScoreResponse as TlmScoreResponse
from .project_list_params import ProjectListParams as ProjectListParams
-from .tlm_prompt_response import TlmPromptResponse as TlmPromptResponse
from .health_check_response import HealthCheckResponse as HealthCheckResponse
from .project_create_params import ProjectCreateParams as ProjectCreateParams
from .project_list_response import ProjectListResponse as ProjectListResponse
diff --git a/src/codex/types/project_create_params.py b/src/codex/types/project_create_params.py
index c75023f0..0d4a4f57 100644
--- a/src/codex/types/project_create_params.py
+++ b/src/codex/types/project_create_params.py
@@ -333,6 +333,10 @@ class Config(TypedDict, total=False):
query_use_llm_matching: bool
+ question_match_llm_prompt: str
+
+ question_match_llm_prompt_with_answer: str
+
tlm_evals_model: str
upper_llm_match_distance_threshold: float
diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py
index 4ac38497..db1666cf 100644
--- a/src/codex/types/project_list_response.py
+++ b/src/codex/types/project_list_response.py
@@ -323,6 +323,10 @@ class ProjectConfig(BaseModel):
query_use_llm_matching: Optional[bool] = None
+ question_match_llm_prompt: Optional[str] = None
+
+ question_match_llm_prompt_with_answer: Optional[str] = None
+
tlm_evals_model: Optional[str] = None
upper_llm_match_distance_threshold: Optional[float] = None
diff --git a/src/codex/types/project_retrieve_response.py b/src/codex/types/project_retrieve_response.py
index 6e87d655..19abb67f 100644
--- a/src/codex/types/project_retrieve_response.py
+++ b/src/codex/types/project_retrieve_response.py
@@ -322,6 +322,10 @@ class Config(BaseModel):
query_use_llm_matching: Optional[bool] = None
+ question_match_llm_prompt: Optional[str] = None
+
+ question_match_llm_prompt_with_answer: Optional[str] = None
+
tlm_evals_model: Optional[str] = None
upper_llm_match_distance_threshold: Optional[float] = None
diff --git a/src/codex/types/project_return_schema.py b/src/codex/types/project_return_schema.py
index bb087cd0..cb114db9 100644
--- a/src/codex/types/project_return_schema.py
+++ b/src/codex/types/project_return_schema.py
@@ -322,6 +322,10 @@ class Config(BaseModel):
query_use_llm_matching: Optional[bool] = None
+ question_match_llm_prompt: Optional[str] = None
+
+ question_match_llm_prompt_with_answer: Optional[str] = None
+
tlm_evals_model: Optional[str] = None
upper_llm_match_distance_threshold: Optional[float] = None
diff --git a/src/codex/types/project_update_params.py b/src/codex/types/project_update_params.py
index c550b436..9b32bb6d 100644
--- a/src/codex/types/project_update_params.py
+++ b/src/codex/types/project_update_params.py
@@ -331,6 +331,10 @@ class Config(TypedDict, total=False):
query_use_llm_matching: bool
+ question_match_llm_prompt: str
+
+ question_match_llm_prompt_with_answer: str
+
tlm_evals_model: str
upper_llm_match_distance_threshold: float
diff --git a/src/codex/types/project_validate_params.py b/src/codex/types/project_validate_params.py
index 56e2ae97..0efa4309 100644
--- a/src/codex/types/project_validate_params.py
+++ b/src/codex/types/project_validate_params.py
@@ -180,15 +180,8 @@ class ProjectValidateParams(TypedDict, total=False):
When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
This parameter has no effect when `disable_trustworthiness` is True.
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
- """
-
- prompt: Optional[str]
- """The prompt to use for the TLM call.
-
- If not provided, the prompt will be generated from the messages.
+ disable_trustworthiness (bool, default = False): if True, TLM will not compute trust scores,
+ useful if you only want to compute custom evaluation criteria.
"""
quality_preset: Literal["best", "high", "medium", "low", "base"]
diff --git a/src/codex/types/project_validate_response.py b/src/codex/types/project_validate_response.py
index 003b676c..e56d3bca 100644
--- a/src/codex/types/project_validate_response.py
+++ b/src/codex/types/project_validate_response.py
@@ -52,6 +52,9 @@ class ProjectValidateResponse(BaseModel):
Codex Project, or None otherwise.
"""
+ expert_review_guardrail_explanation: Optional[str] = None
+ """Explanation from a similar bad query log that caused this to be guardrailed"""
+
is_bad_response: bool
"""True if the response is flagged as potentially bad, False otherwise.
diff --git a/src/codex/types/projects/query_log_list_by_group_params.py b/src/codex/types/projects/query_log_list_by_group_params.py
index 7eb24103..17a260bb 100644
--- a/src/codex/types/projects/query_log_list_by_group_params.py
+++ b/src/codex/types/projects/query_log_list_by_group_params.py
@@ -22,6 +22,9 @@ class QueryLogListByGroupParams(TypedDict, total=False):
custom_metadata: Optional[str]
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
+ expert_review_status: Optional[Literal["good", "bad"]]
+ """Filter by expert review status"""
+
failed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that failed"""
@@ -51,7 +54,24 @@ class QueryLogListByGroupParams(TypedDict, total=False):
remediation_ids: SequenceNotStr[str]
"""List of groups to list child logs for"""
+ search_text: Optional[str]
+ """
+ Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+ """
+
sort: Optional[str]
+ """Field or score to sort by.
+
+ Available fields: 'created_at', 'primary_eval_issue_score'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+ """
tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
diff --git a/src/codex/types/projects/query_log_list_by_group_response.py b/src/codex/types/projects/query_log_list_by_group_response.py
index fc33cdeb..f6156eb2 100644
--- a/src/codex/types/projects/query_log_list_by_group_response.py
+++ b/src/codex/types/projects/query_log_list_by_group_response.py
@@ -45,6 +45,7 @@
"QueryLogsByGroupQueryLogMessageChatCompletionDeveloperMessageParamContentUnionMember1",
"QueryLogsByGroupQueryLogTool",
"QueryLogsByGroupQueryLogToolFunction",
+ "Filters",
]
@@ -399,12 +400,27 @@ class QueryLogsByGroupQueryLog(BaseModel):
Used to log tool calls in the query log.
"""
+ expert_review_created_at: Optional[datetime] = None
+ """When the expert review was created"""
+
+ expert_review_created_by_user_id: Optional[str] = None
+ """ID of the user who created the expert review"""
+
+ expert_review_explanation: Optional[str] = None
+ """Expert explanation when marked as bad"""
+
+ expert_review_status: Optional[Literal["good", "bad"]] = None
+ """Expert review status: 'good' or 'bad'"""
+
guardrail_evals: Optional[List[str]] = None
"""Evals that should trigger guardrail"""
guardrailed: Optional[bool] = None
"""If true, the response was guardrailed"""
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
+
messages: Optional[List[QueryLogsByGroupQueryLogMessage]] = None
"""Message history to provide conversation context for the query.
@@ -425,6 +441,9 @@ class QueryLogsByGroupQueryLog(BaseModel):
primary_eval_issue_score: Optional[float] = None
"""Score of the primary eval issue"""
+ similar_query_log_guardrail_explanation: Optional[str] = None
+ """Explanation from a similar bad query log that caused this to be guardrailed"""
+
tools: Optional[List[QueryLogsByGroupQueryLogTool]] = None
"""Tools to use for the LLM call.
@@ -438,11 +457,62 @@ class QueryLogsByGroup(BaseModel):
total_count: int
+class Filters(BaseModel):
+ custom_metadata_dict: Optional[object] = None
+
+ created_at_end: Optional[datetime] = None
+ """Filter logs created at or before this timestamp"""
+
+ created_at_start: Optional[datetime] = None
+ """Filter logs created at or after this timestamp"""
+
+ custom_metadata: Optional[str] = None
+ """Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
+
+ expert_review_status: Optional[Literal["good", "bad"]] = None
+ """Filter by expert review status"""
+
+ failed_evals: Optional[List[str]] = None
+ """Filter by evals that failed"""
+
+ guardrailed: Optional[bool] = None
+ """Filter by guardrailed status"""
+
+ has_tool_calls: Optional[bool] = None
+ """Filter by whether the query log has tool calls"""
+
+ needs_review: Optional[bool] = None
+ """Filter logs that need review"""
+
+ passed_evals: Optional[List[str]] = None
+ """Filter by evals that passed"""
+
+ primary_eval_issue: Optional[
+ List[Literal["hallucination", "search_failure", "unhelpful", "difficult_query", "ungrounded"]]
+ ] = None
+ """Filter logs that have ANY of these primary evaluation issues (OR operation)"""
+
+ search_text: Optional[str] = None
+ """
+ Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+ """
+
+ tool_call_names: Optional[List[str]] = None
+ """Filter by names of tools called in the assistant response"""
+
+ was_cache_hit: Optional[bool] = None
+ """Filter by cache hit status"""
+
+
class QueryLogListByGroupResponse(BaseModel):
custom_metadata_columns: List[str]
"""Columns of the custom metadata"""
query_logs_by_group: Dict[str, QueryLogsByGroup]
+ filters: Optional[Filters] = None
+ """Applied filters for the query"""
+
tool_names: Optional[List[str]] = None
"""Names of the tools available in queries"""
diff --git a/src/codex/types/projects/query_log_list_groups_params.py b/src/codex/types/projects/query_log_list_groups_params.py
index abb6a54a..ece65b16 100644
--- a/src/codex/types/projects/query_log_list_groups_params.py
+++ b/src/codex/types/projects/query_log_list_groups_params.py
@@ -22,6 +22,9 @@ class QueryLogListGroupsParams(TypedDict, total=False):
custom_metadata: Optional[str]
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
+ expert_review_status: Optional[Literal["good", "bad"]]
+ """Filter by expert review status"""
+
failed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that failed"""
@@ -48,7 +51,25 @@ class QueryLogListGroupsParams(TypedDict, total=False):
]
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
- sort: Optional[Literal["created_at", "primary_eval_issue_score", "total_count", "custom_rank", "impact_score"]]
+ search_text: Optional[str]
+ """
+ Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+ """
+
+ sort: Optional[str]
+ """Field or score to sort by.
+
+ Available fields: 'created_at', 'custom_rank', 'impact_score',
+ 'primary_eval_issue_score', 'total_count'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+ """
tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py
index 7dbf1929..11c24b21 100644
--- a/src/codex/types/projects/query_log_list_groups_response.py
+++ b/src/codex/types/projects/query_log_list_groups_response.py
@@ -395,12 +395,27 @@ class QueryLogListGroupsResponse(BaseModel):
Used to log tool calls in the query log.
"""
+ expert_review_created_at: Optional[datetime] = None
+ """When the expert review was created"""
+
+ expert_review_created_by_user_id: Optional[str] = None
+ """ID of the user who created the expert review"""
+
+ expert_review_explanation: Optional[str] = None
+ """Expert explanation when marked as bad"""
+
+ expert_review_status: Optional[Literal["good", "bad"]] = None
+ """Expert review status: 'good' or 'bad'"""
+
guardrail_evals: Optional[List[str]] = None
"""Evals that should trigger guardrail"""
guardrailed: Optional[bool] = None
"""If true, the response was guardrailed"""
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
+
messages: Optional[List[Message]] = None
"""Message history to provide conversation context for the query.
@@ -421,6 +436,9 @@ class QueryLogListGroupsResponse(BaseModel):
primary_eval_issue_score: Optional[float] = None
"""Score of the primary eval issue"""
+ similar_query_log_guardrail_explanation: Optional[str] = None
+ """Explanation from a similar bad query log that caused this to be guardrailed"""
+
tools: Optional[List[Tool]] = None
"""Tools to use for the LLM call.
diff --git a/src/codex/types/projects/query_log_list_params.py b/src/codex/types/projects/query_log_list_params.py
index f0037632..eb7858a6 100644
--- a/src/codex/types/projects/query_log_list_params.py
+++ b/src/codex/types/projects/query_log_list_params.py
@@ -22,6 +22,9 @@ class QueryLogListParams(TypedDict, total=False):
custom_metadata: Optional[str]
"""Filter by custom metadata as JSON string: {"key1": "value1", "key2": "value2"}"""
+ expert_review_status: Optional[Literal["good", "bad"]]
+ """Filter by expert review status"""
+
failed_evals: Optional[SequenceNotStr[str]]
"""Filter by evals that failed"""
@@ -45,7 +48,24 @@ class QueryLogListParams(TypedDict, total=False):
]
"""Filter logs that have ANY of these primary evaluation issues (OR operation)"""
+ search_text: Optional[str]
+ """
+ Case-insensitive search across evaluated_response and question fields
+ (original_question if available, otherwise question)
+ """
+
sort: Optional[str]
+ """Field or score to sort by.
+
+ Available fields: 'created_at', 'primary_eval_issue_score'.
+
+ For eval scores, use '.eval.' prefix followed by the eval name.
+
+ Default eval scores: '.eval.trustworthiness', '.eval.context_sufficiency',
+ '.eval.response_helpfulness', '.eval.query_ease', '.eval.response_groundedness'.
+
+ Custom eval scores: '.eval.custom_eval_1', '.eval.custom_eval_2', etc.
+ """
tool_call_names: Optional[SequenceNotStr[str]]
"""Filter by names of tools called in the assistant response"""
diff --git a/src/codex/types/projects/query_log_list_response.py b/src/codex/types/projects/query_log_list_response.py
index b56d43d3..098ac817 100644
--- a/src/codex/types/projects/query_log_list_response.py
+++ b/src/codex/types/projects/query_log_list_response.py
@@ -383,6 +383,18 @@ class QueryLogListResponse(BaseModel):
Used to log tool calls in the query log.
"""
+ expert_review_created_at: Optional[datetime] = None
+ """When the expert review was created"""
+
+ expert_review_created_by_user_id: Optional[str] = None
+ """ID of the user who created the expert review"""
+
+ expert_review_explanation: Optional[str] = None
+ """Expert explanation when marked as bad"""
+
+ expert_review_status: Optional[Literal["good", "bad"]] = None
+ """Expert review status: 'good' or 'bad'"""
+
guardrail_evals: Optional[List[str]] = None
"""Evals that should trigger guardrail"""
@@ -409,6 +421,9 @@ class QueryLogListResponse(BaseModel):
primary_eval_issue_score: Optional[float] = None
"""Score of the primary eval issue"""
+ similar_query_log_guardrail_explanation: Optional[str] = None
+ """Explanation from a similar bad query log that caused this to be guardrailed"""
+
tools: Optional[List[Tool]] = None
"""Tools to use for the LLM call.
diff --git a/src/codex/types/projects/query_log_retrieve_response.py b/src/codex/types/projects/query_log_retrieve_response.py
index b9be8d6d..cae996c6 100644
--- a/src/codex/types/projects/query_log_retrieve_response.py
+++ b/src/codex/types/projects/query_log_retrieve_response.py
@@ -387,12 +387,27 @@ class QueryLogRetrieveResponse(BaseModel):
Used to log tool calls in the query log.
"""
+ expert_review_created_at: Optional[datetime] = None
+ """When the expert review was created"""
+
+ expert_review_created_by_user_id: Optional[str] = None
+ """ID of the user who created the expert review"""
+
+ expert_review_explanation: Optional[str] = None
+ """Expert explanation when marked as bad"""
+
+ expert_review_status: Optional[Literal["good", "bad"]] = None
+ """Expert review status: 'good' or 'bad'"""
+
guardrail_evals: Optional[List[str]] = None
"""Evals that should trigger guardrail"""
guardrailed: Optional[bool] = None
"""If true, the response was guardrailed"""
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
+
messages: Optional[List[Message]] = None
"""Message history to provide conversation context for the query.
@@ -413,6 +428,9 @@ class QueryLogRetrieveResponse(BaseModel):
primary_eval_issue_score: Optional[float] = None
"""Score of the primary eval issue"""
+ similar_query_log_guardrail_explanation: Optional[str] = None
+ """Explanation from a similar bad query log that caused this to be guardrailed"""
+
tools: Optional[List[Tool]] = None
"""Tools to use for the LLM call.
diff --git a/src/codex/types/projects/query_log_start_remediation_response.py b/src/codex/types/projects/query_log_start_remediation_response.py
index ee7f0c72..73029a67 100644
--- a/src/codex/types/projects/query_log_start_remediation_response.py
+++ b/src/codex/types/projects/query_log_start_remediation_response.py
@@ -33,3 +33,6 @@ class QueryLogStartRemediationResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_create_response.py b/src/codex/types/projects/remediation_create_response.py
index 9b8a8775..560a4814 100644
--- a/src/codex/types/projects/remediation_create_response.py
+++ b/src/codex/types/projects/remediation_create_response.py
@@ -33,3 +33,6 @@ class RemediationCreateResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_edit_answer_response.py b/src/codex/types/projects/remediation_edit_answer_response.py
index 1d43c082..576d5065 100644
--- a/src/codex/types/projects/remediation_edit_answer_response.py
+++ b/src/codex/types/projects/remediation_edit_answer_response.py
@@ -33,3 +33,6 @@ class RemediationEditAnswerResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_edit_draft_answer_response.py b/src/codex/types/projects/remediation_edit_draft_answer_response.py
index 80f80c07..bec3b4d6 100644
--- a/src/codex/types/projects/remediation_edit_draft_answer_response.py
+++ b/src/codex/types/projects/remediation_edit_draft_answer_response.py
@@ -33,3 +33,6 @@ class RemediationEditDraftAnswerResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_get_resolved_logs_count_response.py b/src/codex/types/projects/remediation_get_resolved_logs_count_response.py
index 9222eb9a..3c7742c2 100644
--- a/src/codex/types/projects/remediation_get_resolved_logs_count_response.py
+++ b/src/codex/types/projects/remediation_get_resolved_logs_count_response.py
@@ -35,3 +35,6 @@ class RemediationGetResolvedLogsCountResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_list_resolved_logs_response.py b/src/codex/types/projects/remediation_list_resolved_logs_response.py
index ed764766..1ef0e6f0 100644
--- a/src/codex/types/projects/remediation_list_resolved_logs_response.py
+++ b/src/codex/types/projects/remediation_list_resolved_logs_response.py
@@ -390,6 +390,18 @@ class QueryLog(BaseModel):
Used to log tool calls in the query log.
"""
+ expert_review_created_at: Optional[datetime] = None
+ """When the expert review was created"""
+
+ expert_review_created_by_user_id: Optional[str] = None
+ """ID of the user who created the expert review"""
+
+ expert_review_explanation: Optional[str] = None
+ """Expert explanation when marked as bad"""
+
+ expert_review_status: Optional[Literal["good", "bad"]] = None
+ """Expert review status: 'good' or 'bad'"""
+
guardrail_evals: Optional[List[str]] = None
"""Evals that should trigger guardrail"""
@@ -416,6 +428,9 @@ class QueryLog(BaseModel):
primary_eval_issue_score: Optional[float] = None
"""Score of the primary eval issue"""
+ similar_query_log_guardrail_explanation: Optional[str] = None
+ """Explanation from a similar bad query log that caused this to be guardrailed"""
+
tools: Optional[List[QueryLogTool]] = None
"""Tools to use for the LLM call.
diff --git a/src/codex/types/projects/remediation_list_response.py b/src/codex/types/projects/remediation_list_response.py
index 83410539..300bacff 100644
--- a/src/codex/types/projects/remediation_list_response.py
+++ b/src/codex/types/projects/remediation_list_response.py
@@ -35,3 +35,6 @@ class RemediationListResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_pause_response.py b/src/codex/types/projects/remediation_pause_response.py
index 97e1ac58..3919112f 100644
--- a/src/codex/types/projects/remediation_pause_response.py
+++ b/src/codex/types/projects/remediation_pause_response.py
@@ -33,3 +33,6 @@ class RemediationPauseResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_publish_response.py b/src/codex/types/projects/remediation_publish_response.py
index b43c8b98..a76eefc9 100644
--- a/src/codex/types/projects/remediation_publish_response.py
+++ b/src/codex/types/projects/remediation_publish_response.py
@@ -33,3 +33,6 @@ class RemediationPublishResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_retrieve_response.py b/src/codex/types/projects/remediation_retrieve_response.py
index 69a327e0..43f269d8 100644
--- a/src/codex/types/projects/remediation_retrieve_response.py
+++ b/src/codex/types/projects/remediation_retrieve_response.py
@@ -33,3 +33,6 @@ class RemediationRetrieveResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/projects/remediation_unpause_response.py b/src/codex/types/projects/remediation_unpause_response.py
index c3ce44fd..e8731c33 100644
--- a/src/codex/types/projects/remediation_unpause_response.py
+++ b/src/codex/types/projects/remediation_unpause_response.py
@@ -33,3 +33,6 @@ class RemediationUnpauseResponse(BaseModel):
answer: Optional[str] = None
draft_answer: Optional[str] = None
+
+ manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None
+ """Manual review status override for remediations."""
diff --git a/src/codex/types/tlm_prompt_params.py b/src/codex/types/tlm_prompt_params.py
deleted file mode 100644
index 6a2a9da3..00000000
--- a/src/codex/types/tlm_prompt_params.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = ["TlmPromptParams", "Options"]
-
-
-class TlmPromptParams(TypedDict, total=False):
- prompt: Required[str]
-
- constrain_outputs: Optional[SequenceNotStr[str]]
-
- options: Optional[Options]
- """
- Typed dict of advanced configuration options for the Trustworthy Language Model.
- Many of these configurations are determined by the quality preset selected
- (learn about quality presets in the TLM [initialization method](./#class-tlm)).
- Specifying TLMOptions values directly overrides any default values set from the
- quality preset.
-
- For all options described below, higher settings will lead to longer runtimes
- and may consume more tokens internally. You may not be able to run long prompts
- (or prompts with long responses) in your account, unless your token/rate limits
- are increased. If you hit token limit issues, try lower/less expensive
- TLMOptions to be able to run longer prompts/responses, or contact Cleanlab to
- increase your limits.
-
- The default values corresponding to each quality preset are:
-
- - **best:** `num_consistency_samples` = 8, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **high:** `num_consistency_samples` = 4, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **medium:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **low:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"none"`.
- - **base:** `num_consistency_samples` = 0, `num_self_reflections` = 1,
- `reasoning_effort` = `"none"`.
-
- By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base
- `model`, and `max_tokens` is set to 512. You can set custom values for these
- arguments regardless of the quality preset specified.
-
- Args: model ({"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "o4-mini", "o3", "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o",
- "o3-mini", "o1", "o1-mini", "gpt-4", "gpt-3.5-turbo-16k", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-sonnet-v2",
- "claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
- "nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
- (better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
-
- log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
- For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
-
- custom_eval_criteria (list[dict[str, Any]], default = []): optionally specify custom evalution criteria beyond the built-in trustworthiness scoring.
- The expected input format is a list of dictionaries, where each dictionary has the following keys:
- - name: Name of the evaluation criteria.
- - criteria: Instructions specifying the evaluation criteria.
-
- max_tokens (int, default = 512): the maximum number of tokens that can be generated in the response from `TLM.prompt()` as well as during internal trustworthiness scoring.
- If you experience token/rate-limit errors, try lowering this number.
- For OpenAI models, this parameter must be between 64 and 4096. For Claude models, this parameter must be between 64 and 512.
-
- reasoning_effort ({"none", "low", "medium", "high"}, default = "high"): how much internal LLM calls are allowed to reason (number of thinking tokens)
- when generating alternative possible responses and reflecting on responses during trustworthiness scoring.
- Reduce this value to reduce runtimes. Higher values may improve trust scoring.
-
- num_self_reflections (int, default = 3): the number of different evaluations to perform where the LLM reflects on the response, a factor affecting trust scoring.
- The maximum number currently supported is 3. Lower values can reduce runtimes.
- Reflection helps quantify aleatoric uncertainty associated with challenging prompts and catches responses that are noticeably incorrect/bad upon further analysis.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- num_consistency_samples (int, default = 8): the amount of internal sampling to measure LLM response consistency, a factor affecting trust scoring.
- Must be between 0 and 20. Lower values can reduce runtimes.
- Measuring consistency helps quantify the epistemic uncertainty associated with
- strange prompts or prompts that are too vague/open-ended to receive a clearly defined 'good' response.
- TLM measures consistency via the degree of contradiction between sampled responses that the model considers plausible.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "discrepancy"): how the
- trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model.
- Supported similarity measures include - "semantic" (based on natural language inference),
- "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model),
- "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies),
- and "string" (based on character/word overlap). Set this to "string" for minimal runtimes.
- This parameter has no effect when `num_consistency_samples = 0`.
-
- num_candidate_responses (int, default = 1): how many alternative candidate responses are internally generated in `TLM.prompt()`.
- `TLM.prompt()` scores the trustworthiness of each candidate response, and then returns the most trustworthy one.
- You can auto-improve responses by increasing this parameter, but at higher runtimes/costs.
- This parameter must be between 1 and 20. It has no effect on `TLM.score()`.
- When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
- """
-
- quality_preset: Literal["best", "high", "medium", "low", "base"]
- """The quality preset to use for the TLM or Trustworthy RAG API."""
-
- task: Optional[str]
-
-
-class Options(TypedDict, total=False):
- custom_eval_criteria: Iterable[object]
-
- disable_persistence: bool
-
- disable_trustworthiness: bool
-
- log: SequenceNotStr[str]
-
- max_tokens: int
-
- model: str
-
- num_candidate_responses: int
-
- num_consistency_samples: int
-
- num_self_reflections: int
-
- reasoning_effort: str
-
- similarity_measure: str
-
- use_self_reflection: bool
diff --git a/src/codex/types/tlm_prompt_response.py b/src/codex/types/tlm_prompt_response.py
deleted file mode 100644
index d939c00e..00000000
--- a/src/codex/types/tlm_prompt_response.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from .._models import BaseModel
-
-__all__ = ["TlmPromptResponse"]
-
-
-class TlmPromptResponse(BaseModel):
- response: str
-
- trustworthiness_score: float
-
- log: Optional[object] = None
diff --git a/src/codex/types/tlm_score_params.py b/src/codex/types/tlm_score_params.py
deleted file mode 100644
index cef4f490..00000000
--- a/src/codex/types/tlm_score_params.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-from typing import Iterable, Optional
-from typing_extensions import Literal, Required, TypedDict
-
-from .._types import SequenceNotStr
-
-__all__ = ["TlmScoreParams", "Options"]
-
-
-class TlmScoreParams(TypedDict, total=False):
- prompt: Required[str]
-
- response: Required[str]
-
- constrain_outputs: Optional[SequenceNotStr[str]]
-
- options: Optional[Options]
- """
- Typed dict of advanced configuration options for the Trustworthy Language Model.
- Many of these configurations are determined by the quality preset selected
- (learn about quality presets in the TLM [initialization method](./#class-tlm)).
- Specifying TLMOptions values directly overrides any default values set from the
- quality preset.
-
- For all options described below, higher settings will lead to longer runtimes
- and may consume more tokens internally. You may not be able to run long prompts
- (or prompts with long responses) in your account, unless your token/rate limits
- are increased. If you hit token limit issues, try lower/less expensive
- TLMOptions to be able to run longer prompts/responses, or contact Cleanlab to
- increase your limits.
-
- The default values corresponding to each quality preset are:
-
- - **best:** `num_consistency_samples` = 8, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **high:** `num_consistency_samples` = 4, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **medium:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"high"`.
- - **low:** `num_consistency_samples` = 0, `num_self_reflections` = 3,
- `reasoning_effort` = `"none"`.
- - **base:** `num_consistency_samples` = 0, `num_self_reflections` = 1,
- `reasoning_effort` = `"none"`.
-
- By default, TLM uses the: "medium" `quality_preset`, "gpt-4.1-mini" base
- `model`, and `max_tokens` is set to 512. You can set custom values for these
- arguments regardless of the quality preset specified.
-
- Args: model ({"gpt-5", "gpt-5-mini", "gpt-5-nano", "gpt-4.1", "gpt-4.1-mini",
- "gpt-4.1-nano", "o4-mini", "o3", "gpt-4.5-preview", "gpt-4o-mini", "gpt-4o",
- "o3-mini", "o1", "o1-mini", "gpt-4", "gpt-3.5-turbo-16k", "claude-opus-4-0",
- "claude-sonnet-4-0", "claude-3.7-sonnet", "claude-3.5-sonnet-v2",
- "claude-3.5-sonnet", "claude-3.5-haiku", "claude-3-haiku", "nova-micro",
- "nova-lite", "nova-pro"}, default = "gpt-4.1-mini"): Underlying base LLM to use
- (better models yield better results, faster models yield faster results). -
- Models still in beta: "o3", "o1", "o4-mini", "o3-mini", "o1-mini",
- "gpt-4.5-preview", "claude-opus-4-0", "claude-sonnet-4-0", "claude-3.7-sonnet",
- "claude-3.5-haiku". - Recommended models for accuracy: "gpt-5", "gpt-4.1",
- "o4-mini", "o3", "claude-opus-4-0", "claude-sonnet-4-0". - Recommended models
- for low latency/costs: "gpt-4.1-nano", "nova-micro".
-
- log (list[str], default = []): optionally specify additional logs or metadata that TLM should return.
- For instance, include "explanation" here to get explanations of why a response is scored with low trustworthiness.
-
- custom_eval_criteria (list[dict[str, Any]], default = []): optionally specify custom evalution criteria beyond the built-in trustworthiness scoring.
- The expected input format is a list of dictionaries, where each dictionary has the following keys:
- - name: Name of the evaluation criteria.
- - criteria: Instructions specifying the evaluation criteria.
-
- max_tokens (int, default = 512): the maximum number of tokens that can be generated in the response from `TLM.prompt()` as well as during internal trustworthiness scoring.
- If you experience token/rate-limit errors, try lowering this number.
- For OpenAI models, this parameter must be between 64 and 4096. For Claude models, this parameter must be between 64 and 512.
-
- reasoning_effort ({"none", "low", "medium", "high"}, default = "high"): how much internal LLM calls are allowed to reason (number of thinking tokens)
- when generating alternative possible responses and reflecting on responses during trustworthiness scoring.
- Reduce this value to reduce runtimes. Higher values may improve trust scoring.
-
- num_self_reflections (int, default = 3): the number of different evaluations to perform where the LLM reflects on the response, a factor affecting trust scoring.
- The maximum number currently supported is 3. Lower values can reduce runtimes.
- Reflection helps quantify aleatoric uncertainty associated with challenging prompts and catches responses that are noticeably incorrect/bad upon further analysis.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- num_consistency_samples (int, default = 8): the amount of internal sampling to measure LLM response consistency, a factor affecting trust scoring.
- Must be between 0 and 20. Lower values can reduce runtimes.
- Measuring consistency helps quantify the epistemic uncertainty associated with
- strange prompts or prompts that are too vague/open-ended to receive a clearly defined 'good' response.
- TLM measures consistency via the degree of contradiction between sampled responses that the model considers plausible.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- similarity_measure ({"semantic", "string", "embedding", "embedding_large", "code", "discrepancy"}, default = "discrepancy"): how the
- trustworthiness scoring's consistency algorithm measures similarity between alternative responses considered plausible by the model.
- Supported similarity measures include - "semantic" (based on natural language inference),
- "embedding" (based on vector embedding similarity), "embedding_large" (based on a larger embedding model),
- "code" (based on model-based analysis designed to compare code), "discrepancy" (based on model-based analysis of possible discrepancies),
- and "string" (based on character/word overlap). Set this to "string" for minimal runtimes.
- This parameter has no effect when `num_consistency_samples = 0`.
-
- num_candidate_responses (int, default = 1): how many alternative candidate responses are internally generated in `TLM.prompt()`.
- `TLM.prompt()` scores the trustworthiness of each candidate response, and then returns the most trustworthy one.
- You can auto-improve responses by increasing this parameter, but at higher runtimes/costs.
- This parameter must be between 1 and 20. It has no effect on `TLM.score()`.
- When this parameter is 1, `TLM.prompt()` simply returns a standard LLM response and does not attempt to auto-improve it.
- This parameter has no effect when `disable_trustworthiness` is True.
-
- disable_trustworthiness (bool, default = False): if True, trustworthiness scoring is disabled and TLM will not compute trust scores for responses.
- This is useful when you only want to use custom evaluation criteria or when you want to minimize computational overhead and only need the base LLM response.
- The following parameters will be ignored when `disable_trustworthiness` is True: `num_consistency_samples`, `num_self_reflections`, `num_candidate_responses`, `reasoning_effort`, `similarity_measure`.
- """
-
- quality_preset: Literal["best", "high", "medium", "low", "base"]
- """The quality preset to use for the TLM or Trustworthy RAG API."""
-
- task: Optional[str]
-
-
-class Options(TypedDict, total=False):
- custom_eval_criteria: Iterable[object]
-
- disable_persistence: bool
-
- disable_trustworthiness: bool
-
- log: SequenceNotStr[str]
-
- max_tokens: int
-
- model: str
-
- num_candidate_responses: int
-
- num_consistency_samples: int
-
- num_self_reflections: int
-
- reasoning_effort: str
-
- similarity_measure: str
-
- use_self_reflection: bool
diff --git a/src/codex/types/tlm_score_response.py b/src/codex/types/tlm_score_response.py
deleted file mode 100644
index e92b2e09..00000000
--- a/src/codex/types/tlm_score_response.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from typing import Optional
-
-from .._models import BaseModel
-
-__all__ = ["TlmScoreResponse"]
-
-
-class TlmScoreResponse(BaseModel):
- trustworthiness_score: float
-
- log: Optional[object] = None
diff --git a/tests/api_resources/projects/test_query_logs.py b/tests/api_resources/projects/test_query_logs.py
index 5f9d1e92..51262fe0 100644
--- a/tests/api_resources/projects/test_query_logs.py
+++ b/tests/api_resources/projects/test_query_logs.py
@@ -100,6 +100,7 @@ def test_method_list_with_all_params(self, client: Codex) -> None:
created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"),
created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"),
custom_metadata="custom_metadata",
+ expert_review_status="good",
failed_evals=["string"],
guardrailed=True,
has_tool_calls=True,
@@ -108,7 +109,8 @@ def test_method_list_with_all_params(self, client: Codex) -> None:
order="asc",
passed_evals=["string"],
primary_eval_issue=["hallucination"],
- sort="sort",
+ search_text="search_text",
+ sort="created_at",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -221,6 +223,7 @@ def test_method_list_by_group_with_all_params(self, client: Codex) -> None:
created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"),
created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"),
custom_metadata="custom_metadata",
+ expert_review_status="good",
failed_evals=["string"],
guardrailed=True,
has_tool_calls=True,
@@ -231,7 +234,8 @@ def test_method_list_by_group_with_all_params(self, client: Codex) -> None:
passed_evals=["string"],
primary_eval_issue=["hallucination"],
remediation_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
- sort="sort",
+ search_text="search_text",
+ sort="created_at",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -287,6 +291,7 @@ def test_method_list_groups_with_all_params(self, client: Codex) -> None:
created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"),
created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"),
custom_metadata="custom_metadata",
+ expert_review_status="good",
failed_evals=["string"],
guardrailed=True,
has_tool_calls=True,
@@ -296,6 +301,7 @@ def test_method_list_groups_with_all_params(self, client: Codex) -> None:
order="asc",
passed_evals=["string"],
primary_eval_issue=["hallucination"],
+ search_text="search_text",
sort="created_at",
tool_call_names=["string"],
was_cache_hit=True,
@@ -519,6 +525,7 @@ async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> No
created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"),
created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"),
custom_metadata="custom_metadata",
+ expert_review_status="good",
failed_evals=["string"],
guardrailed=True,
has_tool_calls=True,
@@ -527,7 +534,8 @@ async def test_method_list_with_all_params(self, async_client: AsyncCodex) -> No
order="asc",
passed_evals=["string"],
primary_eval_issue=["hallucination"],
- sort="sort",
+ search_text="search_text",
+ sort="created_at",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -640,6 +648,7 @@ async def test_method_list_by_group_with_all_params(self, async_client: AsyncCod
created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"),
created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"),
custom_metadata="custom_metadata",
+ expert_review_status="good",
failed_evals=["string"],
guardrailed=True,
has_tool_calls=True,
@@ -650,7 +659,8 @@ async def test_method_list_by_group_with_all_params(self, async_client: AsyncCod
passed_evals=["string"],
primary_eval_issue=["hallucination"],
remediation_ids=["182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e"],
- sort="sort",
+ search_text="search_text",
+ sort="created_at",
tool_call_names=["string"],
was_cache_hit=True,
)
@@ -706,6 +716,7 @@ async def test_method_list_groups_with_all_params(self, async_client: AsyncCodex
created_at_end=parse_datetime("2019-12-27T18:11:19.117Z"),
created_at_start=parse_datetime("2019-12-27T18:11:19.117Z"),
custom_metadata="custom_metadata",
+ expert_review_status="good",
failed_evals=["string"],
guardrailed=True,
has_tool_calls=True,
@@ -715,6 +726,7 @@ async def test_method_list_groups_with_all_params(self, async_client: AsyncCodex
order="asc",
passed_evals=["string"],
primary_eval_issue=["hallucination"],
+ search_text="search_text",
sort="created_at",
tool_call_names=["string"],
was_cache_hit=True,
diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py
index 04eef999..153c3aae 100644
--- a/tests/api_resources/test_projects.py
+++ b/tests/api_resources/test_projects.py
@@ -118,6 +118,8 @@ def test_method_create_with_all_params(self, client: Codex) -> None:
"lower_llm_match_distance_threshold": 0,
"max_distance": 0,
"query_use_llm_matching": True,
+ "question_match_llm_prompt": "question_match_llm_prompt",
+ "question_match_llm_prompt_with_answer": "question_match_llm_prompt_with_answer",
"tlm_evals_model": "tlm_evals_model",
"upper_llm_match_distance_threshold": 0,
},
@@ -294,6 +296,8 @@ def test_method_update_with_all_params(self, client: Codex) -> None:
"lower_llm_match_distance_threshold": 0,
"max_distance": 0,
"query_use_llm_matching": True,
+ "question_match_llm_prompt": "question_match_llm_prompt",
+ "question_match_llm_prompt_with_answer": "question_match_llm_prompt_with_answer",
"tlm_evals_model": "tlm_evals_model",
"upper_llm_match_distance_threshold": 0,
},
@@ -629,7 +633,6 @@ def test_method_validate_with_all_params(self, client: Codex) -> None:
"similarity_measure": "similarity_measure",
"use_self_reflection": True,
},
- prompt="prompt",
quality_preset="best",
rewritten_question="rewritten_question",
task="task",
@@ -794,6 +797,8 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) ->
"lower_llm_match_distance_threshold": 0,
"max_distance": 0,
"query_use_llm_matching": True,
+ "question_match_llm_prompt": "question_match_llm_prompt",
+ "question_match_llm_prompt_with_answer": "question_match_llm_prompt_with_answer",
"tlm_evals_model": "tlm_evals_model",
"upper_llm_match_distance_threshold": 0,
},
@@ -970,6 +975,8 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) ->
"lower_llm_match_distance_threshold": 0,
"max_distance": 0,
"query_use_llm_matching": True,
+ "question_match_llm_prompt": "question_match_llm_prompt",
+ "question_match_llm_prompt_with_answer": "question_match_llm_prompt_with_answer",
"tlm_evals_model": "tlm_evals_model",
"upper_llm_match_distance_threshold": 0,
},
@@ -1305,7 +1312,6 @@ async def test_method_validate_with_all_params(self, async_client: AsyncCodex) -
"similarity_measure": "similarity_measure",
"use_self_reflection": True,
},
- prompt="prompt",
quality_preset="best",
rewritten_question="rewritten_question",
task="task",
diff --git a/tests/api_resources/test_tlm.py b/tests/api_resources/test_tlm.py
deleted file mode 100644
index 6c8c1770..00000000
--- a/tests/api_resources/test_tlm.py
+++ /dev/null
@@ -1,268 +0,0 @@
-# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-
-from __future__ import annotations
-
-import os
-from typing import Any, cast
-
-import pytest
-
-from codex import Codex, AsyncCodex
-from codex.types import TlmScoreResponse, TlmPromptResponse
-from tests.utils import assert_matches_type
-
-base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
-
-
-class TestTlm:
- parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_prompt(self, client: Codex) -> None:
- tlm = client.tlm.prompt(
- prompt="prompt",
- )
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_prompt_with_all_params(self, client: Codex) -> None:
- tlm = client.tlm.prompt(
- prompt="prompt",
- constrain_outputs=["string"],
- options={
- "custom_eval_criteria": [{}],
- "disable_persistence": True,
- "disable_trustworthiness": True,
- "log": ["string"],
- "max_tokens": 0,
- "model": "model",
- "num_candidate_responses": 0,
- "num_consistency_samples": 0,
- "num_self_reflections": 0,
- "reasoning_effort": "reasoning_effort",
- "similarity_measure": "similarity_measure",
- "use_self_reflection": True,
- },
- quality_preset="best",
- task="task",
- )
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_prompt(self, client: Codex) -> None:
- response = client.tlm.with_raw_response.prompt(
- prompt="prompt",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tlm = response.parse()
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_prompt(self, client: Codex) -> None:
- with client.tlm.with_streaming_response.prompt(
- prompt="prompt",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tlm = response.parse()
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_score(self, client: Codex) -> None:
- tlm = client.tlm.score(
- prompt="prompt",
- response="response",
- )
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_method_score_with_all_params(self, client: Codex) -> None:
- tlm = client.tlm.score(
- prompt="prompt",
- response="response",
- constrain_outputs=["string"],
- options={
- "custom_eval_criteria": [{}],
- "disable_persistence": True,
- "disable_trustworthiness": True,
- "log": ["string"],
- "max_tokens": 0,
- "model": "model",
- "num_candidate_responses": 0,
- "num_consistency_samples": 0,
- "num_self_reflections": 0,
- "reasoning_effort": "reasoning_effort",
- "similarity_measure": "similarity_measure",
- "use_self_reflection": True,
- },
- quality_preset="best",
- task="task",
- )
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_raw_response_score(self, client: Codex) -> None:
- response = client.tlm.with_raw_response.score(
- prompt="prompt",
- response="response",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tlm = response.parse()
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- def test_streaming_response_score(self, client: Codex) -> None:
- with client.tlm.with_streaming_response.score(
- prompt="prompt",
- response="response",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tlm = response.parse()
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
-
-class TestAsyncTlm:
- parametrize = pytest.mark.parametrize(
- "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
- )
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_prompt(self, async_client: AsyncCodex) -> None:
- tlm = await async_client.tlm.prompt(
- prompt="prompt",
- )
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_prompt_with_all_params(self, async_client: AsyncCodex) -> None:
- tlm = await async_client.tlm.prompt(
- prompt="prompt",
- constrain_outputs=["string"],
- options={
- "custom_eval_criteria": [{}],
- "disable_persistence": True,
- "disable_trustworthiness": True,
- "log": ["string"],
- "max_tokens": 0,
- "model": "model",
- "num_candidate_responses": 0,
- "num_consistency_samples": 0,
- "num_self_reflections": 0,
- "reasoning_effort": "reasoning_effort",
- "similarity_measure": "similarity_measure",
- "use_self_reflection": True,
- },
- quality_preset="best",
- task="task",
- )
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_prompt(self, async_client: AsyncCodex) -> None:
- response = await async_client.tlm.with_raw_response.prompt(
- prompt="prompt",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tlm = await response.parse()
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_prompt(self, async_client: AsyncCodex) -> None:
- async with async_client.tlm.with_streaming_response.prompt(
- prompt="prompt",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tlm = await response.parse()
- assert_matches_type(TlmPromptResponse, tlm, path=["response"])
-
- assert cast(Any, response.is_closed) is True
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_score(self, async_client: AsyncCodex) -> None:
- tlm = await async_client.tlm.score(
- prompt="prompt",
- response="response",
- )
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_method_score_with_all_params(self, async_client: AsyncCodex) -> None:
- tlm = await async_client.tlm.score(
- prompt="prompt",
- response="response",
- constrain_outputs=["string"],
- options={
- "custom_eval_criteria": [{}],
- "disable_persistence": True,
- "disable_trustworthiness": True,
- "log": ["string"],
- "max_tokens": 0,
- "model": "model",
- "num_candidate_responses": 0,
- "num_consistency_samples": 0,
- "num_self_reflections": 0,
- "reasoning_effort": "reasoning_effort",
- "similarity_measure": "similarity_measure",
- "use_self_reflection": True,
- },
- quality_preset="best",
- task="task",
- )
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_raw_response_score(self, async_client: AsyncCodex) -> None:
- response = await async_client.tlm.with_raw_response.score(
- prompt="prompt",
- response="response",
- )
-
- assert response.is_closed is True
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
- tlm = await response.parse()
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- @pytest.mark.skip(reason="Prism tests are disabled")
- @parametrize
- async def test_streaming_response_score(self, async_client: AsyncCodex) -> None:
- async with async_client.tlm.with_streaming_response.score(
- prompt="prompt",
- response="response",
- ) as response:
- assert not response.is_closed
- assert response.http_request.headers.get("X-Stainless-Lang") == "python"
-
- tlm = await response.parse()
- assert_matches_type(TlmScoreResponse, tlm, path=["response"])
-
- assert cast(Any, response.is_closed) is True
diff --git a/tests/test_client.py b/tests/test_client.py
index cdc717fd..438d827a 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -6,13 +6,10 @@
import os
import sys
import json
-import time
import asyncio
import inspect
-import subprocess
import tracemalloc
from typing import Any, Union, cast
-from textwrap import dedent
from unittest import mock
from typing_extensions import Literal
@@ -23,14 +20,17 @@
from codex import Codex, AsyncCodex, APIResponseValidationError
from codex._types import Omit
+from codex._utils import asyncify
from codex._models import BaseModel, FinalRequestOptions
from codex._exceptions import APIStatusError, APITimeoutError, APIResponseValidationError
from codex._base_client import (
DEFAULT_TIMEOUT,
HTTPX_DEFAULT_TIMEOUT,
BaseClient,
+ OtherPlatform,
DefaultHttpxClient,
DefaultAsyncHttpxClient,
+ get_platform,
make_request_options,
)
@@ -1671,50 +1671,9 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
assert response.http_request.headers.get("x-stainless-retry-count") == "42"
- def test_get_platform(self) -> None:
- # A previous implementation of asyncify could leave threads unterminated when
- # used with nest_asyncio.
- #
- # Since nest_asyncio.apply() is global and cannot be un-applied, this
- # test is run in a separate process to avoid affecting other tests.
- test_code = dedent("""
- import asyncio
- import nest_asyncio
- import threading
-
- from codex._utils import asyncify
- from codex._base_client import get_platform
-
- async def test_main() -> None:
- result = await asyncify(get_platform)()
- print(result)
- for thread in threading.enumerate():
- print(thread.name)
-
- nest_asyncio.apply()
- asyncio.run(test_main())
- """)
- with subprocess.Popen(
- [sys.executable, "-c", test_code],
- text=True,
- ) as process:
- timeout = 10 # seconds
-
- start_time = time.monotonic()
- while True:
- return_code = process.poll()
- if return_code is not None:
- if return_code != 0:
- raise AssertionError("calling get_platform using asyncify resulted in a non-zero exit code")
-
- # success
- break
-
- if time.monotonic() - start_time > timeout:
- process.kill()
- raise AssertionError("calling get_platform using asyncify resulted in a hung process")
-
- time.sleep(0.1)
+ async def test_get_platform(self) -> None:
+ platform = await asyncify(get_platform)()
+ assert isinstance(platform, (str, OtherPlatform))
async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
# Test that the proxy environment variables are set correctly
diff --git a/tests/test_transform.py b/tests/test_transform.py
index 4067f583..9afdf358 100644
--- a/tests/test_transform.py
+++ b/tests/test_transform.py
@@ -8,7 +8,7 @@
import pytest
-from codex._types import NOT_GIVEN, Base64FileInput
+from codex._types import Base64FileInput, omit, not_given
from codex._utils import (
PropertyInfo,
transform as _transform,
@@ -450,4 +450,11 @@ async def test_transform_skipping(use_async: bool) -> None:
@pytest.mark.asyncio
async def test_strips_notgiven(use_async: bool) -> None:
assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
- assert await transform({"foo_bar": NOT_GIVEN}, Foo1, use_async) == {}
+ assert await transform({"foo_bar": not_given}, Foo1, use_async) == {}
+
+
+@parametrize
+@pytest.mark.asyncio
+async def test_strips_omit(use_async: bool) -> None:
+ assert await transform({"foo_bar": "bar"}, Foo1, use_async) == {"fooBar": "bar"}
+ assert await transform({"foo_bar": omit}, Foo1, use_async) == {}