From b5a889c4bdd788df03f310e30d6a3ee304193f53 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 02:11:36 +0000 Subject: [PATCH 1/7] fix(client): close streams without requiring full consumption --- src/codex/_streaming.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/codex/_streaming.py b/src/codex/_streaming.py index 3af102ce..d9c4a80a 100644 --- a/src/codex/_streaming.py +++ b/src/codex/_streaming.py @@ -57,9 +57,8 @@ def __stream__(self) -> Iterator[_T]: for sse in iterator: yield process_data(data=sse.json(), cast_to=cast_to, response=response) - # Ensure the entire stream is consumed - for _sse in iterator: - ... + # As we might not fully consume the response stream, we need to close it explicitly + response.close() def __enter__(self) -> Self: return self @@ -121,9 +120,8 @@ async def __stream__(self) -> AsyncIterator[_T]: async for sse in iterator: yield process_data(data=sse.json(), cast_to=cast_to, response=response) - # Ensure the entire stream is consumed - async for _sse in iterator: - ... + # As we might not fully consume the response stream, we need to close it explicitly + await response.aclose() async def __aenter__(self) -> Self: return self From ac9771379a115b0ebc28d916382a8c9755fdd59b Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 30 Oct 2025 17:18:09 +0000 Subject: [PATCH 2/7] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index 5add63e3..b8e90dd9 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 56 -openapi_spec_hash: ef178c3ce0c31f0785212f1138ee8eee +openapi_spec_hash: 212db383b6467e2148e62041f38c5cfb config_hash: 9e0ed146f9f6e6d1884a4c0589d6f1c2 From 8f06048416bf39cdad0290b23fd8d930be869223 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 02:26:42 +0000 Subject: [PATCH 3/7] chore(internal/tests): avoid race condition with implicit client cleanup --- tests/test_client.py | 366 ++++++++++++++++++++++++------------------- 1 file changed, 202 insertions(+), 164 deletions(-) diff --git a/tests/test_client.py b/tests/test_client.py index 438d827a..45622f7d 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -59,51 +59,49 @@ def _get_open_connections(client: Codex | AsyncCodex) -> int: class TestCodex: - client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - @pytest.mark.respx(base_url=base_url) - def test_raw_response(self, respx_mock: MockRouter) -> None: + def test_raw_response(self, respx_mock: MockRouter, client: Codex) -> None: respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = self.client.post("/foo", cast_to=httpx.Response) + response = client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} @pytest.mark.respx(base_url=base_url) - def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: + def test_raw_response_for_binary(self, respx_mock: MockRouter, client: Codex) -> None: respx_mock.post("/foo").mock( return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') ) - response = self.client.post("/foo", cast_to=httpx.Response) + response = client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} - def test_copy(self) -> None: - copied = self.client.copy() - assert id(copied) != id(self.client) + def test_copy(self, client: Codex) -> None: + copied = client.copy() + assert id(copied) != id(client) - copied = self.client.copy(auth_token="another My Auth Token") + copied = client.copy(auth_token="another My Auth Token") assert copied.auth_token == "another My Auth Token" - assert self.client.auth_token == "My Auth Token" + assert client.auth_token == "My Auth Token" - def test_copy_default_options(self) -> None: + def test_copy_default_options(self, client: Codex) -> None: # options that have a default are overridden correctly - copied = self.client.copy(max_retries=7) + copied = client.copy(max_retries=7) assert copied.max_retries == 7 - assert self.client.max_retries == 2 + assert client.max_retries == 2 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 assert copied.max_retries == 7 # timeout - assert isinstance(self.client.timeout, httpx.Timeout) - copied = self.client.copy(timeout=None) + assert isinstance(client.timeout, httpx.Timeout) + copied = client.copy(timeout=None) assert copied.timeout is None - assert isinstance(self.client.timeout, httpx.Timeout) + assert isinstance(client.timeout, httpx.Timeout) def test_copy_default_headers(self) -> None: client = Codex( @@ -138,6 +136,7 @@ def test_copy_default_headers(self) -> None: match="`default_headers` and `set_default_headers` arguments are mutually exclusive", ): client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + client.close() def test_copy_default_query(self) -> None: client = Codex( @@ -175,13 +174,15 @@ def test_copy_default_query(self) -> None: ): client.copy(set_default_query={}, default_query={"foo": "Bar"}) - def test_copy_signature(self) -> None: + client.close() + + def test_copy_signature(self, client: Codex) -> None: # ensure the same parameters that can be passed to the client are defined in the `.copy()` method init_signature = inspect.signature( # mypy doesn't like that we access the `__init__` property. - self.client.__init__, # type: ignore[misc] + client.__init__, # type: ignore[misc] ) - copy_signature = inspect.signature(self.client.copy) + copy_signature = inspect.signature(client.copy) exclude_params = {"transport", "proxies", "_strict_response_validation"} for name in init_signature.parameters.keys(): @@ -192,12 +193,12 @@ def test_copy_signature(self) -> None: assert copy_param is not None, f"copy() signature is missing the {name} param" @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") - def test_copy_build_request(self) -> None: + def test_copy_build_request(self, client: Codex) -> None: options = FinalRequestOptions(method="get", url="/foo") def build_request(options: FinalRequestOptions) -> None: - client = self.client.copy() - client._build_request(options) + client_copy = client.copy() + client_copy._build_request(options) # ensure that the machinery is warmed up before tracing starts. build_request(options) @@ -254,14 +255,12 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic print(frame) raise AssertionError() - def test_request_timeout(self) -> None: - request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + def test_request_timeout(self, client: Codex) -> None: + request = client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT - request = self.client._build_request( - FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) - ) + request = client._build_request(FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(100.0) @@ -274,6 +273,8 @@ def test_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(0) + client.close() + def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used with httpx.Client(timeout=None) as http_client: @@ -285,6 +286,8 @@ def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) + client.close() + # no timeout given to the httpx client should not use the httpx default with httpx.Client() as http_client: client = Codex( @@ -295,6 +298,8 @@ def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT + client.close() + # explicitly passing the default timeout currently results in it being ignored with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = Codex( @@ -305,6 +310,8 @@ def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default + client.close() + async def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): async with httpx.AsyncClient() as http_client: @@ -316,14 +323,14 @@ async def test_invalid_http_client(self) -> None: ) def test_default_headers_option(self) -> None: - client = Codex( + test_client = Codex( base_url=base_url, auth_token=auth_token, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = Codex( + test_client2 = Codex( base_url=base_url, auth_token=auth_token, _strict_response_validation=True, @@ -332,10 +339,13 @@ def test_default_headers_option(self) -> None: "X-Stainless-Lang": "my-overriding-header", }, ) - request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" + test_client.close() + test_client2.close() + def test_default_query_option(self) -> None: client = Codex( base_url=base_url, @@ -357,8 +367,10 @@ def test_default_query_option(self) -> None: url = httpx.URL(request.url) assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} - def test_request_extra_json(self) -> None: - request = self.client._build_request( + client.close() + + def test_request_extra_json(self, client: Codex) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -369,7 +381,7 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": False} - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -380,7 +392,7 @@ def test_request_extra_json(self) -> None: assert data == {"baz": False} # `extra_json` takes priority over `json_data` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -391,8 +403,8 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": None} - def test_request_extra_headers(self) -> None: - request = self.client._build_request( + def test_request_extra_headers(self, client: Codex) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -402,7 +414,7 @@ def test_request_extra_headers(self) -> None: assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash - request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + request = client.with_options(default_headers={"X-Bar": "true"})._build_request( FinalRequestOptions( method="post", url="/foo", @@ -413,8 +425,8 @@ def test_request_extra_headers(self) -> None: ) assert request.headers.get("X-Bar") == "false" - def test_request_extra_query(self) -> None: - request = self.client._build_request( + def test_request_extra_query(self, client: Codex) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -427,7 +439,7 @@ def test_request_extra_query(self) -> None: assert params == {"my_query_param": "Foo"} # if both `query` and `extra_query` are given, they are merged - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -441,7 +453,7 @@ def test_request_extra_query(self) -> None: assert params == {"bar": "1", "foo": "2"} # `extra_query` takes priority over `query` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -484,7 +496,7 @@ def test_multipart_repeating_array(self, client: Codex) -> None: ] @pytest.mark.respx(base_url=base_url) - def test_basic_union_response(self, respx_mock: MockRouter) -> None: + def test_basic_union_response(self, respx_mock: MockRouter, client: Codex) -> None: class Model1(BaseModel): name: str @@ -493,12 +505,12 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" @pytest.mark.respx(base_url=base_url) - def test_union_response_different_types(self, respx_mock: MockRouter) -> None: + def test_union_response_different_types(self, respx_mock: MockRouter, client: Codex) -> None: """Union of objects with the same field name using a different type""" class Model1(BaseModel): @@ -509,18 +521,18 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) - response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) - def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter, client: Codex) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ @@ -536,7 +548,7 @@ class Model(BaseModel): ) ) - response = self.client.get("/foo", cast_to=Model) + response = client.get("/foo", cast_to=Model) assert isinstance(response, Model) assert response.foo == 2 @@ -550,6 +562,8 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" + client.close() + def test_base_url_env(self) -> None: with update_env(CODEX_BASE_URL="http://localhost:5000/from/env"): client = Codex(auth_token=auth_token, _strict_response_validation=True) @@ -565,6 +579,8 @@ def test_base_url_env(self) -> None: ) assert str(client.base_url).startswith("https://api-codex.cleanlab.ai") + client.close() + @pytest.mark.parametrize( "client", [ @@ -589,6 +605,7 @@ def test_base_url_trailing_slash(self, client: Codex) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + client.close() @pytest.mark.parametrize( "client", @@ -614,6 +631,7 @@ def test_base_url_no_trailing_slash(self, client: Codex) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + client.close() @pytest.mark.parametrize( "client", @@ -639,35 +657,36 @@ def test_absolute_request_url(self, client: Codex) -> None: ), ) assert request.url == "https://myapi.com/foo" + client.close() def test_copied_client_does_not_close_http(self) -> None: - client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - assert not client.is_closed() + test_client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) + assert not test_client.is_closed() - copied = client.copy() - assert copied is not client + copied = test_client.copy() + assert copied is not test_client del copied - assert not client.is_closed() + assert not test_client.is_closed() def test_client_context_manager(self) -> None: - client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - with client as c2: - assert c2 is client + test_client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) + with test_client as c2: + assert c2 is test_client assert not c2.is_closed() - assert not client.is_closed() - assert client.is_closed() + assert not test_client.is_closed() + assert test_client.is_closed() @pytest.mark.respx(base_url=base_url) - def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + def test_client_response_validation_error(self, respx_mock: MockRouter, client: Codex) -> None: class Model(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) with pytest.raises(APIResponseValidationError) as exc: - self.client.get("/foo", cast_to=Model) + client.get("/foo", cast_to=Model) assert isinstance(exc.value.__cause__, ValidationError) @@ -689,11 +708,14 @@ class Model(BaseModel): with pytest.raises(APIResponseValidationError): strict_client.get("/foo", cast_to=Model) - client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=False) + non_strict_client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=False) - response = client.get("/foo", cast_to=Model) + response = non_strict_client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] + strict_client.close() + non_strict_client.close() + @pytest.mark.parametrize( "remaining_retries,retry_after,timeout", [ @@ -716,9 +738,9 @@ class Model(BaseModel): ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) - def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = Codex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - + def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float, client: Codex + ) -> None: headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) calculated = client._calculate_retry_timeout(remaining_retries, options, headers) @@ -734,7 +756,7 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ).__enter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(client) == 0 @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @@ -745,7 +767,7 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client client.projects.with_streaming_response.create( config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ).__enter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @@ -857,83 +879,77 @@ def test_default_client_creation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - def test_follow_redirects(self, respx_mock: MockRouter) -> None: + def test_follow_redirects(self, respx_mock: MockRouter, client: Codex) -> None: # Test that the default follow_redirects=True allows following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) - response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + response = client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) assert response.status_code == 200 assert response.json() == {"status": "ok"} @pytest.mark.respx(base_url=base_url) - def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + def test_follow_redirects_disabled(self, respx_mock: MockRouter, client: Codex) -> None: # Test that follow_redirects=False prevents following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) with pytest.raises(APIStatusError) as exc_info: - self.client.post( - "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response - ) + client.post("/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response) assert exc_info.value.response.status_code == 302 assert exc_info.value.response.headers["Location"] == f"{base_url}/redirected" class TestAsyncCodex: - client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_raw_response(self, respx_mock: MockRouter) -> None: + async def test_raw_response(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = await self.client.post("/foo", cast_to=httpx.Response) + response = await async_client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None: + async def test_raw_response_for_binary(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: respx_mock.post("/foo").mock( return_value=httpx.Response(200, headers={"Content-Type": "application/binary"}, content='{"foo": "bar"}') ) - response = await self.client.post("/foo", cast_to=httpx.Response) + response = await async_client.post("/foo", cast_to=httpx.Response) assert response.status_code == 200 assert isinstance(response, httpx.Response) assert response.json() == {"foo": "bar"} - def test_copy(self) -> None: - copied = self.client.copy() - assert id(copied) != id(self.client) + def test_copy(self, async_client: AsyncCodex) -> None: + copied = async_client.copy() + assert id(copied) != id(async_client) - copied = self.client.copy(auth_token="another My Auth Token") + copied = async_client.copy(auth_token="another My Auth Token") assert copied.auth_token == "another My Auth Token" - assert self.client.auth_token == "My Auth Token" + assert async_client.auth_token == "My Auth Token" - def test_copy_default_options(self) -> None: + def test_copy_default_options(self, async_client: AsyncCodex) -> None: # options that have a default are overridden correctly - copied = self.client.copy(max_retries=7) + copied = async_client.copy(max_retries=7) assert copied.max_retries == 7 - assert self.client.max_retries == 2 + assert async_client.max_retries == 2 copied2 = copied.copy(max_retries=6) assert copied2.max_retries == 6 assert copied.max_retries == 7 # timeout - assert isinstance(self.client.timeout, httpx.Timeout) - copied = self.client.copy(timeout=None) + assert isinstance(async_client.timeout, httpx.Timeout) + copied = async_client.copy(timeout=None) assert copied.timeout is None - assert isinstance(self.client.timeout, httpx.Timeout) + assert isinstance(async_client.timeout, httpx.Timeout) - def test_copy_default_headers(self) -> None: + async def test_copy_default_headers(self) -> None: client = AsyncCodex( base_url=base_url, auth_token=auth_token, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) @@ -966,8 +982,9 @@ def test_copy_default_headers(self) -> None: match="`default_headers` and `set_default_headers` arguments are mutually exclusive", ): client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"}) + await client.close() - def test_copy_default_query(self) -> None: + async def test_copy_default_query(self) -> None: client = AsyncCodex( base_url=base_url, auth_token=auth_token, _strict_response_validation=True, default_query={"foo": "bar"} ) @@ -1003,13 +1020,15 @@ def test_copy_default_query(self) -> None: ): client.copy(set_default_query={}, default_query={"foo": "Bar"}) - def test_copy_signature(self) -> None: + await client.close() + + def test_copy_signature(self, async_client: AsyncCodex) -> None: # ensure the same parameters that can be passed to the client are defined in the `.copy()` method init_signature = inspect.signature( # mypy doesn't like that we access the `__init__` property. - self.client.__init__, # type: ignore[misc] + async_client.__init__, # type: ignore[misc] ) - copy_signature = inspect.signature(self.client.copy) + copy_signature = inspect.signature(async_client.copy) exclude_params = {"transport", "proxies", "_strict_response_validation"} for name in init_signature.parameters.keys(): @@ -1020,12 +1039,12 @@ def test_copy_signature(self) -> None: assert copy_param is not None, f"copy() signature is missing the {name} param" @pytest.mark.skipif(sys.version_info >= (3, 10), reason="fails because of a memory leak that started from 3.12") - def test_copy_build_request(self) -> None: + def test_copy_build_request(self, async_client: AsyncCodex) -> None: options = FinalRequestOptions(method="get", url="/foo") def build_request(options: FinalRequestOptions) -> None: - client = self.client.copy() - client._build_request(options) + client_copy = async_client.copy() + client_copy._build_request(options) # ensure that the machinery is warmed up before tracing starts. build_request(options) @@ -1082,12 +1101,12 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic print(frame) raise AssertionError() - async def test_request_timeout(self) -> None: - request = self.client._build_request(FinalRequestOptions(method="get", url="/foo")) + async def test_request_timeout(self, async_client: AsyncCodex) -> None: + request = async_client._build_request(FinalRequestOptions(method="get", url="/foo")) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT - request = self.client._build_request( + request = async_client._build_request( FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0)) ) timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore @@ -1102,6 +1121,8 @@ async def test_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(0) + await client.close() + async def test_http_client_timeout_option(self) -> None: # custom timeout given to the httpx client should be used async with httpx.AsyncClient(timeout=None) as http_client: @@ -1113,6 +1134,8 @@ async def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == httpx.Timeout(None) + await client.close() + # no timeout given to the httpx client should not use the httpx default async with httpx.AsyncClient() as http_client: client = AsyncCodex( @@ -1123,6 +1146,8 @@ async def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT + await client.close() + # explicitly passing the default timeout currently results in it being ignored async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client: client = AsyncCodex( @@ -1133,6 +1158,8 @@ async def test_http_client_timeout_option(self) -> None: timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore assert timeout == DEFAULT_TIMEOUT # our default + await client.close() + def test_invalid_http_client(self) -> None: with pytest.raises(TypeError, match="Invalid `http_client` arg"): with httpx.Client() as http_client: @@ -1143,15 +1170,15 @@ def test_invalid_http_client(self) -> None: http_client=cast(Any, http_client), ) - def test_default_headers_option(self) -> None: - client = AsyncCodex( + async def test_default_headers_option(self) -> None: + test_client = AsyncCodex( base_url=base_url, auth_token=auth_token, _strict_response_validation=True, default_headers={"X-Foo": "bar"} ) - request = client._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "bar" assert request.headers.get("x-stainless-lang") == "python" - client2 = AsyncCodex( + test_client2 = AsyncCodex( base_url=base_url, auth_token=auth_token, _strict_response_validation=True, @@ -1160,11 +1187,14 @@ def test_default_headers_option(self) -> None: "X-Stainless-Lang": "my-overriding-header", }, ) - request = client2._build_request(FinalRequestOptions(method="get", url="/foo")) + request = test_client2._build_request(FinalRequestOptions(method="get", url="/foo")) assert request.headers.get("x-foo") == "stainless" assert request.headers.get("x-stainless-lang") == "my-overriding-header" - def test_default_query_option(self) -> None: + await test_client.close() + await test_client2.close() + + async def test_default_query_option(self) -> None: client = AsyncCodex( base_url=base_url, auth_token=auth_token, @@ -1185,8 +1215,10 @@ def test_default_query_option(self) -> None: url = httpx.URL(request.url) assert dict(url.params) == {"foo": "baz", "query_param": "overridden"} - def test_request_extra_json(self) -> None: - request = self.client._build_request( + await client.close() + + def test_request_extra_json(self, client: Codex) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1197,7 +1229,7 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": False} - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1208,7 +1240,7 @@ def test_request_extra_json(self) -> None: assert data == {"baz": False} # `extra_json` takes priority over `json_data` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1219,8 +1251,8 @@ def test_request_extra_json(self) -> None: data = json.loads(request.content.decode("utf-8")) assert data == {"foo": "bar", "baz": None} - def test_request_extra_headers(self) -> None: - request = self.client._build_request( + def test_request_extra_headers(self, client: Codex) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1230,7 +1262,7 @@ def test_request_extra_headers(self) -> None: assert request.headers.get("X-Foo") == "Foo" # `extra_headers` takes priority over `default_headers` when keys clash - request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request( + request = client.with_options(default_headers={"X-Bar": "true"})._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1241,8 +1273,8 @@ def test_request_extra_headers(self) -> None: ) assert request.headers.get("X-Bar") == "false" - def test_request_extra_query(self) -> None: - request = self.client._build_request( + def test_request_extra_query(self, client: Codex) -> None: + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1255,7 +1287,7 @@ def test_request_extra_query(self) -> None: assert params == {"my_query_param": "Foo"} # if both `query` and `extra_query` are given, they are merged - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1269,7 +1301,7 @@ def test_request_extra_query(self) -> None: assert params == {"bar": "1", "foo": "2"} # `extra_query` takes priority over `query` when keys clash - request = self.client._build_request( + request = client._build_request( FinalRequestOptions( method="post", url="/foo", @@ -1312,7 +1344,7 @@ def test_multipart_repeating_array(self, async_client: AsyncCodex) -> None: ] @pytest.mark.respx(base_url=base_url) - async def test_basic_union_response(self, respx_mock: MockRouter) -> None: + async def test_basic_union_response(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: class Model1(BaseModel): name: str @@ -1321,12 +1353,12 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" @pytest.mark.respx(base_url=base_url) - async def test_union_response_different_types(self, respx_mock: MockRouter) -> None: + async def test_union_response_different_types(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: """Union of objects with the same field name using a different type""" class Model1(BaseModel): @@ -1337,18 +1369,20 @@ class Model2(BaseModel): respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model2) assert response.foo == "bar" respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1})) - response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) + response = await async_client.get("/foo", cast_to=cast(Any, Union[Model1, Model2])) assert isinstance(response, Model1) assert response.foo == 1 @pytest.mark.respx(base_url=base_url) - async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None: + async def test_non_application_json_content_type_for_json_data( + self, respx_mock: MockRouter, async_client: AsyncCodex + ) -> None: """ Response that sets Content-Type to something other than application/json but returns json data """ @@ -1364,11 +1398,11 @@ class Model(BaseModel): ) ) - response = await self.client.get("/foo", cast_to=Model) + response = await async_client.get("/foo", cast_to=Model) assert isinstance(response, Model) assert response.foo == 2 - def test_base_url_setter(self) -> None: + async def test_base_url_setter(self) -> None: client = AsyncCodex( base_url="https://example.com/from_init", auth_token=auth_token, _strict_response_validation=True ) @@ -1378,7 +1412,9 @@ def test_base_url_setter(self) -> None: assert client.base_url == "https://example.com/from_setter/" - def test_base_url_env(self) -> None: + await client.close() + + async def test_base_url_env(self) -> None: with update_env(CODEX_BASE_URL="http://localhost:5000/from/env"): client = AsyncCodex(auth_token=auth_token, _strict_response_validation=True) assert client.base_url == "http://localhost:5000/from/env/" @@ -1393,6 +1429,8 @@ def test_base_url_env(self) -> None: ) assert str(client.base_url).startswith("https://api-codex.cleanlab.ai") + await client.close() + @pytest.mark.parametrize( "client", [ @@ -1408,7 +1446,7 @@ def test_base_url_env(self) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_trailing_slash(self, client: AsyncCodex) -> None: + async def test_base_url_trailing_slash(self, client: AsyncCodex) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1417,6 +1455,7 @@ def test_base_url_trailing_slash(self, client: AsyncCodex) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + await client.close() @pytest.mark.parametrize( "client", @@ -1433,7 +1472,7 @@ def test_base_url_trailing_slash(self, client: AsyncCodex) -> None: ], ids=["standard", "custom http client"], ) - def test_base_url_no_trailing_slash(self, client: AsyncCodex) -> None: + async def test_base_url_no_trailing_slash(self, client: AsyncCodex) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1442,6 +1481,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncCodex) -> None: ), ) assert request.url == "http://localhost:5000/custom/path/foo" + await client.close() @pytest.mark.parametrize( "client", @@ -1458,7 +1498,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncCodex) -> None: ], ids=["standard", "custom http client"], ) - def test_absolute_request_url(self, client: AsyncCodex) -> None: + async def test_absolute_request_url(self, client: AsyncCodex) -> None: request = client._build_request( FinalRequestOptions( method="post", @@ -1467,37 +1507,37 @@ def test_absolute_request_url(self, client: AsyncCodex) -> None: ), ) assert request.url == "https://myapi.com/foo" + await client.close() async def test_copied_client_does_not_close_http(self) -> None: - client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - assert not client.is_closed() + test_client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) + assert not test_client.is_closed() - copied = client.copy() - assert copied is not client + copied = test_client.copy() + assert copied is not test_client del copied await asyncio.sleep(0.2) - assert not client.is_closed() + assert not test_client.is_closed() async def test_client_context_manager(self) -> None: - client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - async with client as c2: - assert c2 is client + test_client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) + async with test_client as c2: + assert c2 is test_client assert not c2.is_closed() - assert not client.is_closed() - assert client.is_closed() + assert not test_client.is_closed() + assert test_client.is_closed() @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio - async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None: + async def test_client_response_validation_error(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: class Model(BaseModel): foo: str respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}})) with pytest.raises(APIResponseValidationError) as exc: - await self.client.get("/foo", cast_to=Model) + await async_client.get("/foo", cast_to=Model) assert isinstance(exc.value.__cause__, ValidationError) @@ -1508,7 +1548,6 @@ async def test_client_max_retries_validation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None: class Model(BaseModel): name: str @@ -1520,11 +1559,14 @@ class Model(BaseModel): with pytest.raises(APIResponseValidationError): await strict_client.get("/foo", cast_to=Model) - client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=False) + non_strict_client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=False) - response = await client.get("/foo", cast_to=Model) + response = await non_strict_client.get("/foo", cast_to=Model) assert isinstance(response, str) # type: ignore[unreachable] + await strict_client.close() + await non_strict_client.close() + @pytest.mark.parametrize( "remaining_retries,retry_after,timeout", [ @@ -1547,13 +1589,12 @@ class Model(BaseModel): ], ) @mock.patch("time.time", mock.MagicMock(return_value=1696004797)) - @pytest.mark.asyncio - async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None: - client = AsyncCodex(base_url=base_url, auth_token=auth_token, _strict_response_validation=True) - + async def test_parse_retry_after_header( + self, remaining_retries: int, retry_after: str, timeout: float, async_client: AsyncCodex + ) -> None: headers = httpx.Headers({"retry-after": retry_after}) options = FinalRequestOptions(method="get", url="/foo", max_retries=3) - calculated = client._calculate_retry_timeout(remaining_retries, options, headers) + calculated = async_client._calculate_retry_timeout(remaining_retries, options, headers) assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType] @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @@ -1566,7 +1607,7 @@ async def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ).__aenter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(async_client) == 0 @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) @@ -1577,12 +1618,11 @@ async def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, await async_client.projects.with_streaming_response.create( config={}, name="name", organization_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e" ).__aenter__() - assert _get_open_connections(self.client) == 0 + assert _get_open_connections(async_client) == 0 @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio @pytest.mark.parametrize("failure_mode", ["status", "exception"]) async def test_retries_taken( self, @@ -1616,7 +1656,6 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio async def test_omit_retry_count_header( self, async_client: AsyncCodex, failures_before_success: int, respx_mock: MockRouter ) -> None: @@ -1645,7 +1684,6 @@ def retry_handler(_request: httpx.Request) -> httpx.Response: @pytest.mark.parametrize("failures_before_success", [0, 2, 4]) @mock.patch("codex._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout) @pytest.mark.respx(base_url=base_url) - @pytest.mark.asyncio async def test_overwrite_retry_count_header( self, async_client: AsyncCodex, failures_before_success: int, respx_mock: MockRouter ) -> None: @@ -1698,26 +1736,26 @@ async def test_default_client_creation(self) -> None: ) @pytest.mark.respx(base_url=base_url) - async def test_follow_redirects(self, respx_mock: MockRouter) -> None: + async def test_follow_redirects(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: # Test that the default follow_redirects=True allows following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"})) - response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) + response = await async_client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response) assert response.status_code == 200 assert response.json() == {"status": "ok"} @pytest.mark.respx(base_url=base_url) - async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None: + async def test_follow_redirects_disabled(self, respx_mock: MockRouter, async_client: AsyncCodex) -> None: # Test that follow_redirects=False prevents following redirects respx_mock.post("/redirect").mock( return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"}) ) with pytest.raises(APIStatusError) as exc_info: - await self.client.post( + await async_client.post( "/redirect", body={"key": "value"}, options={"follow_redirects": False}, cast_to=httpx.Response ) From 3fe0592cbb7f1f622af46309ff470d38b43cc8a7 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 31 Oct 2025 18:18:00 +0000 Subject: [PATCH 4/7] codegen metadata --- .stats.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.stats.yml b/.stats.yml index b8e90dd9..84b8a0b7 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 56 -openapi_spec_hash: 212db383b6467e2148e62041f38c5cfb +openapi_spec_hash: 406f4f54d2c48da90ff1a668d2372a7a config_hash: 9e0ed146f9f6e6d1884a4c0589d6f1c2 From 9d4a4318fac51055f63af10ba451b26bc6c1f179 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Tue, 4 Nov 2025 03:36:05 +0000 Subject: [PATCH 5/7] chore(internal): grammar fix (it's -> its) --- src/codex/_utils/_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codex/_utils/_utils.py b/src/codex/_utils/_utils.py index 50d59269..eec7f4a1 100644 --- a/src/codex/_utils/_utils.py +++ b/src/codex/_utils/_utils.py @@ -133,7 +133,7 @@ def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]: # Type safe methods for narrowing types with TypeVars. # The default narrowing for isinstance(obj, dict) is dict[unknown, unknown], # however this cause Pyright to rightfully report errors. As we know we don't -# care about the contained types we can safely use `object` in it's place. +# care about the contained types we can safely use `object` in its place. # # There are two separate functions defined, `is_*` and `is_*_t` for different use cases. # `is_*` is for when you're dealing with an unknown input From e8c853706394437e0738d53594a7d635df224117 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Nov 2025 19:17:56 +0000 Subject: [PATCH 6/7] feat(api): api update --- .stats.yml | 2 +- src/codex/resources/projects/evals.py | 48 ++-- src/codex/types/project_create_params.py | 150 +++++++++--- src/codex/types/project_detect_params.py | 150 +++++++++--- src/codex/types/project_detect_response.py | 48 +++- src/codex/types/project_list_response.py | 150 +++++++++--- src/codex/types/project_retrieve_response.py | 150 +++++++++--- src/codex/types/project_return_schema.py | 150 +++++++++--- src/codex/types/project_update_params.py | 150 +++++++++--- src/codex/types/project_validate_response.py | 74 +++++- .../types/projects/eval_create_params.py | 26 ++- .../types/projects/eval_list_response.py | 26 ++- .../types/projects/eval_update_params.py | 56 ++++- .../query_log_list_by_group_response.py | 47 +++- .../query_log_list_groups_response.py | 47 +++- .../types/projects/query_log_list_response.py | 47 +++- .../projects/query_log_retrieve_response.py | 47 +++- ...remediation_list_resolved_logs_response.py | 47 +++- tests/api_resources/projects/test_evals.py | 36 ++- tests/api_resources/test_projects.py | 216 +++++++++++++++--- 20 files changed, 1387 insertions(+), 280 deletions(-) diff --git a/.stats.yml b/.stats.yml index 84b8a0b7..fdb7be86 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,3 +1,3 @@ configured_endpoints: 56 -openapi_spec_hash: 406f4f54d2c48da90ff1a668d2372a7a +openapi_spec_hash: 80b1836ebec22fc0dc25d5d8efe62a50 config_hash: 9e0ed146f9f6e6d1884a4c0589d6f1c2 diff --git a/src/codex/resources/projects/evals.py b/src/codex/resources/projects/evals.py index f732981f..dd535575 100644 --- a/src/codex/resources/projects/evals.py +++ b/src/codex/resources/projects/evals.py @@ -54,7 +54,7 @@ def create( name: str, context_identifier: Optional[str] | Omit = omit, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_create_params.GuardrailedFallback] | Omit = omit, is_default: bool | Omit = omit, priority: Optional[int] | Omit = omit, query_identifier: Optional[str] | Omit = omit, @@ -87,8 +87,7 @@ def create( enabled: Allows the evaluation to be disabled without removing it - guardrailed_fallback_message: Fallback message to use if this eval fails and causes the response to be - guardrailed + guardrailed_fallback: message, priority, type is_default: Whether the eval is a default, built-in eval or a custom eval @@ -128,7 +127,7 @@ def create( "name": name, "context_identifier": context_identifier, "enabled": enabled, - "guardrailed_fallback_message": guardrailed_fallback_message, + "guardrailed_fallback": guardrailed_fallback, "is_default": is_default, "priority": priority, "query_identifier": query_identifier, @@ -157,7 +156,8 @@ def update( name: str, context_identifier: Optional[str] | Omit = omit, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_update_params.CustomEvalCreateOrUpdateSchemaGuardrailedFallback] + | Omit = omit, is_default: bool | Omit = omit, priority: Optional[int] | Omit = omit, query_identifier: Optional[str] | Omit = omit, @@ -190,8 +190,7 @@ def update( enabled: Allows the evaluation to be disabled without removing it - guardrailed_fallback_message: Fallback message to use if this eval fails and causes the response to be - guardrailed + guardrailed_fallback: message, priority, type is_default: Whether the eval is a default, built-in eval or a custom eval @@ -230,7 +229,7 @@ def update( project_id: str, body_eval_key: str, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_update_params.DefaultEvalUpdateSchemaGuardrailedFallback] | Omit = omit, priority: Optional[int] | Omit = omit, should_escalate: bool | Omit = omit, should_guardrail: bool | Omit = omit, @@ -252,8 +251,7 @@ def update( enabled: Allows the evaluation to be disabled without removing it - guardrailed_fallback_message: Fallback message to use if this eval fails and causes the response to be - guardrailed + guardrailed_fallback: message, priority, type priority: Priority order for evals (lower number = higher priority) to determine primary eval issue to surface @@ -288,7 +286,9 @@ def update( name: str | Omit = omit, context_identifier: Optional[str] | Omit = omit, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_update_params.CustomEvalCreateOrUpdateSchemaGuardrailedFallback] + | Optional[eval_update_params.DefaultEvalUpdateSchemaGuardrailedFallback] + | Omit = omit, is_default: bool | Omit = omit, priority: Optional[int] | Omit = omit, query_identifier: Optional[str] | Omit = omit, @@ -317,7 +317,7 @@ def update( "name": name, "context_identifier": context_identifier, "enabled": enabled, - "guardrailed_fallback_message": guardrailed_fallback_message, + "guardrailed_fallback": guardrailed_fallback, "is_default": is_default, "priority": priority, "query_identifier": query_identifier, @@ -448,7 +448,7 @@ async def create( name: str, context_identifier: Optional[str] | Omit = omit, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_create_params.GuardrailedFallback] | Omit = omit, is_default: bool | Omit = omit, priority: Optional[int] | Omit = omit, query_identifier: Optional[str] | Omit = omit, @@ -481,8 +481,7 @@ async def create( enabled: Allows the evaluation to be disabled without removing it - guardrailed_fallback_message: Fallback message to use if this eval fails and causes the response to be - guardrailed + guardrailed_fallback: message, priority, type is_default: Whether the eval is a default, built-in eval or a custom eval @@ -522,7 +521,7 @@ async def create( "name": name, "context_identifier": context_identifier, "enabled": enabled, - "guardrailed_fallback_message": guardrailed_fallback_message, + "guardrailed_fallback": guardrailed_fallback, "is_default": is_default, "priority": priority, "query_identifier": query_identifier, @@ -551,7 +550,8 @@ async def update( name: str, context_identifier: Optional[str] | Omit = omit, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_update_params.CustomEvalCreateOrUpdateSchemaGuardrailedFallback] + | Omit = omit, is_default: bool | Omit = omit, priority: Optional[int] | Omit = omit, query_identifier: Optional[str] | Omit = omit, @@ -584,8 +584,7 @@ async def update( enabled: Allows the evaluation to be disabled without removing it - guardrailed_fallback_message: Fallback message to use if this eval fails and causes the response to be - guardrailed + guardrailed_fallback: message, priority, type is_default: Whether the eval is a default, built-in eval or a custom eval @@ -624,7 +623,7 @@ async def update( project_id: str, body_eval_key: str, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_update_params.DefaultEvalUpdateSchemaGuardrailedFallback] | Omit = omit, priority: Optional[int] | Omit = omit, should_escalate: bool | Omit = omit, should_guardrail: bool | Omit = omit, @@ -646,8 +645,7 @@ async def update( enabled: Allows the evaluation to be disabled without removing it - guardrailed_fallback_message: Fallback message to use if this eval fails and causes the response to be - guardrailed + guardrailed_fallback: message, priority, type priority: Priority order for evals (lower number = higher priority) to determine primary eval issue to surface @@ -682,7 +680,9 @@ async def update( name: str | Omit = omit, context_identifier: Optional[str] | Omit = omit, enabled: bool | Omit = omit, - guardrailed_fallback_message: Optional[str] | Omit = omit, + guardrailed_fallback: Optional[eval_update_params.CustomEvalCreateOrUpdateSchemaGuardrailedFallback] + | Optional[eval_update_params.DefaultEvalUpdateSchemaGuardrailedFallback] + | Omit = omit, is_default: bool | Omit = omit, priority: Optional[int] | Omit = omit, query_identifier: Optional[str] | Omit = omit, @@ -711,7 +711,7 @@ async def update( "name": name, "context_identifier": context_identifier, "enabled": enabled, - "guardrailed_fallback_message": guardrailed_fallback_message, + "guardrailed_fallback": guardrailed_fallback, "is_default": is_default, "priority": priority, "query_identifier": query_identifier, diff --git a/src/codex/types/project_create_params.py b/src/codex/types/project_create_params.py index bd14d3dd..4704f638 100644 --- a/src/codex/types/project_create_params.py +++ b/src/codex/types/project_create_params.py @@ -11,12 +11,18 @@ "ConfigEvalConfig", "ConfigEvalConfigCustomEvals", "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback", "ConfigEvalConfigDefaultEvals", "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback", "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsTrustworthiness", + "ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback", ] @@ -32,6 +38,23 @@ class ProjectCreateParams(TypedDict, total=False): description: Optional[str] +class ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): criteria: Required[str] """ @@ -57,11 +80,8 @@ class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback] + """message, priority, type""" is_default: bool """Whether the eval is a default, built-in eval or a custom eval""" @@ -103,6 +123,23 @@ class ConfigEvalConfigCustomEvals(TypedDict, total=False): evals: Dict[str, ConfigEvalConfigCustomEvalsEvals] +class ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): eval_key: Required[str] """ @@ -116,11 +153,8 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -144,6 +178,23 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): eval_key: Required[str] """ @@ -157,11 +208,8 @@ class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -185,6 +233,23 @@ class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): eval_key: Required[str] """ @@ -198,11 +263,8 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -226,6 +288,23 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): eval_key: Required[str] """ @@ -239,11 +318,8 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -267,6 +343,23 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): eval_key: Required[str] """ @@ -280,11 +373,8 @@ class ConfigEvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ diff --git a/src/codex/types/project_detect_params.py b/src/codex/types/project_detect_params.py index f29d3e00..8e93971b 100644 --- a/src/codex/types/project_detect_params.py +++ b/src/codex/types/project_detect_params.py @@ -31,12 +31,18 @@ "EvalConfig", "EvalConfigCustomEvals", "EvalConfigCustomEvalsEvals", + "EvalConfigCustomEvalsEvalsGuardrailedFallback", "EvalConfigDefaultEvals", "EvalConfigDefaultEvalsContextSufficiency", + "EvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback", "EvalConfigDefaultEvalsQueryEase", + "EvalConfigDefaultEvalsQueryEaseGuardrailedFallback", "EvalConfigDefaultEvalsResponseGroundedness", + "EvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback", "EvalConfigDefaultEvalsResponseHelpfulness", + "EvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback", "EvalConfigDefaultEvalsTrustworthiness", + "EvalConfigDefaultEvalsTrustworthinessGuardrailedFallback", "Message", "MessageChatCompletionAssistantMessageParamInput", "MessageChatCompletionAssistantMessageParamInputAudio", @@ -433,6 +439,23 @@ class ResponseChatCompletionTyped(TypedDict, total=False): Response: TypeAlias = Union[str, ResponseChatCompletion] +class EvalConfigCustomEvalsEvalsGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class EvalConfigCustomEvalsEvals(TypedDict, total=False): criteria: Required[str] """ @@ -458,11 +481,8 @@ class EvalConfigCustomEvalsEvals(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalConfigCustomEvalsEvalsGuardrailedFallback] + """message, priority, type""" is_default: bool """Whether the eval is a default, built-in eval or a custom eval""" @@ -504,6 +524,23 @@ class EvalConfigCustomEvals(TypedDict, total=False): evals: Dict[str, EvalConfigCustomEvalsEvals] +class EvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class EvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): eval_key: Required[str] """ @@ -517,11 +554,8 @@ class EvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -545,6 +579,23 @@ class EvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class EvalConfigDefaultEvalsQueryEaseGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class EvalConfigDefaultEvalsQueryEase(TypedDict, total=False): eval_key: Required[str] """ @@ -558,11 +609,8 @@ class EvalConfigDefaultEvalsQueryEase(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalConfigDefaultEvalsQueryEaseGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -586,6 +634,23 @@ class EvalConfigDefaultEvalsQueryEase(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class EvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class EvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): eval_key: Required[str] """ @@ -599,11 +664,8 @@ class EvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -627,6 +689,23 @@ class EvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class EvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class EvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): eval_key: Required[str] """ @@ -640,11 +719,8 @@ class EvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -668,6 +744,23 @@ class EvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class EvalConfigDefaultEvalsTrustworthinessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class EvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): eval_key: Required[str] """ @@ -681,11 +774,8 @@ class EvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalConfigDefaultEvalsTrustworthinessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ diff --git a/src/codex/types/project_detect_response.py b/src/codex/types/project_detect_response.py index 4e4b74f7..df03c864 100644 --- a/src/codex/types/project_detect_response.py +++ b/src/codex/types/project_detect_response.py @@ -1,14 +1,32 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Dict, Optional +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["ProjectDetectResponse", "EvalScores"] +__all__ = ["ProjectDetectResponse", "EvalScores", "EvalScoresGuardrailedFallback", "GuardrailedFallback"] + + +class EvalScoresGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" class EvalScores(BaseModel): - guardrailed_fallback_message: Optional[str] = None + guardrailed_fallback: Optional[EvalScoresGuardrailedFallback] = None score: Optional[float] = None @@ -21,6 +39,26 @@ class EvalScores(BaseModel): log: Optional[object] = None +class GuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class ProjectDetectResponse(BaseModel): escalated_to_sme: bool """ @@ -47,6 +85,12 @@ class ProjectDetectResponse(BaseModel): expert review. Expert review will override the original guardrail decision. """ + guardrailed_fallback: Optional[GuardrailedFallback] = None + """ + Name, fallback message, fallback priority, and fallback type of the triggered + guardrail with the highest fallback priority + """ + should_guardrail: bool """ True if the response should be guardrailed by the AI system, False if the diff --git a/src/codex/types/project_list_response.py b/src/codex/types/project_list_response.py index 66d2037f..e4ce5585 100644 --- a/src/codex/types/project_list_response.py +++ b/src/codex/types/project_list_response.py @@ -13,16 +13,39 @@ "ProjectConfigEvalConfig", "ProjectConfigEvalConfigCustomEvals", "ProjectConfigEvalConfigCustomEvalsEvals", + "ProjectConfigEvalConfigCustomEvalsEvalsGuardrailedFallback", "ProjectConfigEvalConfigDefaultEvals", "ProjectConfigEvalConfigDefaultEvalsContextSufficiency", + "ProjectConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback", "ProjectConfigEvalConfigDefaultEvalsQueryEase", + "ProjectConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback", "ProjectConfigEvalConfigDefaultEvalsResponseGroundedness", + "ProjectConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback", "ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ProjectConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback", "ProjectConfigEvalConfigDefaultEvalsTrustworthiness", + "ProjectConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback", "Filters", ] +class ProjectConfigEvalConfigCustomEvalsEvalsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ProjectConfigEvalConfigCustomEvalsEvals(BaseModel): criteria: str """ @@ -54,11 +77,8 @@ class ProjectConfigEvalConfigCustomEvalsEvals(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ProjectConfigEvalConfigCustomEvalsEvalsGuardrailedFallback] = None + """message, priority, type""" is_default: Optional[bool] = None """Whether the eval is a default, built-in eval or a custom eval""" @@ -100,6 +120,23 @@ class ProjectConfigEvalConfigCustomEvals(BaseModel): evals: Optional[Dict[str, ProjectConfigEvalConfigCustomEvalsEvals]] = None +class ProjectConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ProjectConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): display_name: str """Human-friendly name for display. @@ -119,11 +156,8 @@ class ProjectConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ProjectConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -147,6 +181,23 @@ class ProjectConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ProjectConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ProjectConfigEvalConfigDefaultEvalsQueryEase(BaseModel): display_name: str """Human-friendly name for display. @@ -166,11 +217,8 @@ class ProjectConfigEvalConfigDefaultEvalsQueryEase(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ProjectConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -194,6 +242,23 @@ class ProjectConfigEvalConfigDefaultEvalsQueryEase(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ProjectConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ProjectConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): display_name: str """Human-friendly name for display. @@ -213,11 +278,8 @@ class ProjectConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ProjectConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -241,6 +303,23 @@ class ProjectConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ProjectConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): display_name: str """Human-friendly name for display. @@ -260,11 +339,8 @@ class ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ProjectConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -288,6 +364,23 @@ class ProjectConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ProjectConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ProjectConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): display_name: str """Human-friendly name for display. @@ -307,11 +400,8 @@ class ProjectConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ProjectConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ diff --git a/src/codex/types/project_retrieve_response.py b/src/codex/types/project_retrieve_response.py index 694f590b..8fe77415 100644 --- a/src/codex/types/project_retrieve_response.py +++ b/src/codex/types/project_retrieve_response.py @@ -12,15 +12,38 @@ "ConfigEvalConfig", "ConfigEvalConfigCustomEvals", "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback", "ConfigEvalConfigDefaultEvals", "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback", "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsTrustworthiness", + "ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback", ] +class ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigCustomEvalsEvals(BaseModel): criteria: str """ @@ -52,11 +75,8 @@ class ConfigEvalConfigCustomEvalsEvals(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback] = None + """message, priority, type""" is_default: Optional[bool] = None """Whether the eval is a default, built-in eval or a custom eval""" @@ -98,6 +118,23 @@ class ConfigEvalConfigCustomEvals(BaseModel): evals: Optional[Dict[str, ConfigEvalConfigCustomEvalsEvals]] = None +class ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): display_name: str """Human-friendly name for display. @@ -117,11 +154,8 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -145,6 +179,23 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): display_name: str """Human-friendly name for display. @@ -164,11 +215,8 @@ class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -192,6 +240,23 @@ class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): display_name: str """Human-friendly name for display. @@ -211,11 +276,8 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -239,6 +301,23 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): display_name: str """Human-friendly name for display. @@ -258,11 +337,8 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -286,6 +362,23 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): display_name: str """Human-friendly name for display. @@ -305,11 +398,8 @@ class ConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ diff --git a/src/codex/types/project_return_schema.py b/src/codex/types/project_return_schema.py index b8b54044..423d0ce2 100644 --- a/src/codex/types/project_return_schema.py +++ b/src/codex/types/project_return_schema.py @@ -12,15 +12,38 @@ "ConfigEvalConfig", "ConfigEvalConfigCustomEvals", "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback", "ConfigEvalConfigDefaultEvals", "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback", "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsTrustworthiness", + "ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback", ] +class ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigCustomEvalsEvals(BaseModel): criteria: str """ @@ -52,11 +75,8 @@ class ConfigEvalConfigCustomEvalsEvals(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback] = None + """message, priority, type""" is_default: Optional[bool] = None """Whether the eval is a default, built-in eval or a custom eval""" @@ -98,6 +118,23 @@ class ConfigEvalConfigCustomEvals(BaseModel): evals: Optional[Dict[str, ConfigEvalConfigCustomEvalsEvals]] = None +class ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): display_name: str """Human-friendly name for display. @@ -117,11 +154,8 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -145,6 +179,23 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): display_name: str """Human-friendly name for display. @@ -164,11 +215,8 @@ class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -192,6 +240,23 @@ class ConfigEvalConfigDefaultEvalsQueryEase(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): display_name: str """Human-friendly name for display. @@ -211,11 +276,8 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -239,6 +301,23 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): display_name: str """Human-friendly name for display. @@ -258,11 +337,8 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ @@ -286,6 +362,23 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(BaseModel): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): display_name: str """Human-friendly name for display. @@ -305,11 +398,8 @@ class ConfigEvalConfigDefaultEvalsTrustworthiness(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback] = None + """message, priority, type""" priority: Optional[int] = None """ diff --git a/src/codex/types/project_update_params.py b/src/codex/types/project_update_params.py index 4ca5abfc..3557c2d0 100644 --- a/src/codex/types/project_update_params.py +++ b/src/codex/types/project_update_params.py @@ -11,12 +11,18 @@ "ConfigEvalConfig", "ConfigEvalConfigCustomEvals", "ConfigEvalConfigCustomEvalsEvals", + "ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback", "ConfigEvalConfigDefaultEvals", "ConfigEvalConfigDefaultEvalsContextSufficiency", + "ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback", "ConfigEvalConfigDefaultEvalsQueryEase", + "ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseGroundedness", + "ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsResponseHelpfulness", + "ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback", "ConfigEvalConfigDefaultEvalsTrustworthiness", + "ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback", ] @@ -30,6 +36,23 @@ class ProjectUpdateParams(TypedDict, total=False): name: Optional[str] +class ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): criteria: Required[str] """ @@ -55,11 +78,8 @@ class ConfigEvalConfigCustomEvalsEvals(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigCustomEvalsEvalsGuardrailedFallback] + """message, priority, type""" is_default: bool """Whether the eval is a default, built-in eval or a custom eval""" @@ -101,6 +121,23 @@ class ConfigEvalConfigCustomEvals(TypedDict, total=False): evals: Dict[str, ConfigEvalConfigCustomEvalsEvals] +class ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): eval_key: Required[str] """ @@ -114,11 +151,8 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsContextSufficiencyGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -142,6 +176,23 @@ class ConfigEvalConfigDefaultEvalsContextSufficiency(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): eval_key: Required[str] """ @@ -155,11 +206,8 @@ class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsQueryEaseGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -183,6 +231,23 @@ class ConfigEvalConfigDefaultEvalsQueryEase(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): eval_key: Required[str] """ @@ -196,11 +261,8 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseGroundednessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -224,6 +286,23 @@ class ConfigEvalConfigDefaultEvalsResponseGroundedness(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): eval_key: Required[str] """ @@ -237,11 +316,8 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsResponseHelpfulnessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -265,6 +341,23 @@ class ConfigEvalConfigDefaultEvalsResponseHelpfulness(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class ConfigEvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): eval_key: Required[str] """ @@ -278,11 +371,8 @@ class ConfigEvalConfigDefaultEvalsTrustworthiness(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[ConfigEvalConfigDefaultEvalsTrustworthinessGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ diff --git a/src/codex/types/project_validate_response.py b/src/codex/types/project_validate_response.py index 458e4fc5..b9166c20 100644 --- a/src/codex/types/project_validate_response.py +++ b/src/codex/types/project_validate_response.py @@ -1,10 +1,35 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from typing import Dict, List, Optional +from typing_extensions import Literal from .._models import BaseModel -__all__ = ["ProjectValidateResponse", "DeterministicGuardrailsResults", "EvalScores"] +__all__ = [ + "ProjectValidateResponse", + "DeterministicGuardrailsResults", + "DeterministicGuardrailsResultsGuardrailedFallback", + "EvalScores", + "EvalScoresGuardrailedFallback", + "GuardrailedFallback", +] + + +class DeterministicGuardrailsResultsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" class DeterministicGuardrailsResults(BaseModel): @@ -12,13 +37,30 @@ class DeterministicGuardrailsResults(BaseModel): should_guardrail: bool - fallback_message: Optional[str] = None + guardrailed_fallback: Optional[DeterministicGuardrailsResultsGuardrailedFallback] = None matches: Optional[List[str]] = None +class EvalScoresGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class EvalScores(BaseModel): - guardrailed_fallback_message: Optional[str] = None + guardrailed_fallback: Optional[EvalScoresGuardrailedFallback] = None score: Optional[float] = None @@ -31,6 +73,26 @@ class EvalScores(BaseModel): log: Optional[object] = None +class GuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class ProjectValidateResponse(BaseModel): deterministic_guardrails_results: Optional[Dict[str, DeterministicGuardrailsResults]] = None """Results from deterministic guardrails applied to the response.""" @@ -60,6 +122,12 @@ class ProjectValidateResponse(BaseModel): expert review. Expert review will override the original guardrail decision. """ + guardrailed_fallback: Optional[GuardrailedFallback] = None + """ + Name, fallback message, fallback priority, and fallback type of the triggered + guardrail with the highest fallback priority + """ + log_id: str """The UUID of the query log entry created for this validation request.""" diff --git a/src/codex/types/projects/eval_create_params.py b/src/codex/types/projects/eval_create_params.py index 5f66f6ad..d4ec41e6 100644 --- a/src/codex/types/projects/eval_create_params.py +++ b/src/codex/types/projects/eval_create_params.py @@ -5,7 +5,7 @@ from typing import Optional from typing_extensions import Literal, Required, TypedDict -__all__ = ["EvalCreateParams"] +__all__ = ["EvalCreateParams", "GuardrailedFallback"] class EvalCreateParams(TypedDict, total=False): @@ -33,11 +33,8 @@ class EvalCreateParams(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[GuardrailedFallback] + """message, priority, type""" is_default: bool """Whether the eval is a default, built-in eval or a custom eval""" @@ -73,3 +70,20 @@ class EvalCreateParams(TypedDict, total=False): threshold_direction: Literal["above", "below"] """Whether the evaluation fails when score is above or below the threshold""" + + +class GuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" diff --git a/src/codex/types/projects/eval_list_response.py b/src/codex/types/projects/eval_list_response.py index 572de97b..2aa0d755 100644 --- a/src/codex/types/projects/eval_list_response.py +++ b/src/codex/types/projects/eval_list_response.py @@ -5,7 +5,24 @@ from ..._models import BaseModel -__all__ = ["EvalListResponse", "Eval"] +__all__ = ["EvalListResponse", "Eval", "EvalGuardrailedFallback"] + + +class EvalGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" class Eval(BaseModel): @@ -39,11 +56,8 @@ class Eval(BaseModel): enabled: Optional[bool] = None """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] = None - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[EvalGuardrailedFallback] = None + """message, priority, type""" is_default: Optional[bool] = None """Whether the eval is a default, built-in eval or a custom eval""" diff --git a/src/codex/types/projects/eval_update_params.py b/src/codex/types/projects/eval_update_params.py index 1cfa8360..7da4e1ee 100644 --- a/src/codex/types/projects/eval_update_params.py +++ b/src/codex/types/projects/eval_update_params.py @@ -7,7 +7,13 @@ from ..._utils import PropertyInfo -__all__ = ["EvalUpdateParams", "CustomEvalCreateOrUpdateSchema", "DefaultEvalUpdateSchema"] +__all__ = [ + "EvalUpdateParams", + "CustomEvalCreateOrUpdateSchema", + "CustomEvalCreateOrUpdateSchemaGuardrailedFallback", + "DefaultEvalUpdateSchema", + "DefaultEvalUpdateSchemaGuardrailedFallback", +] class CustomEvalCreateOrUpdateSchema(TypedDict, total=False): @@ -37,11 +43,8 @@ class CustomEvalCreateOrUpdateSchema(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[CustomEvalCreateOrUpdateSchemaGuardrailedFallback] + """message, priority, type""" is_default: bool """Whether the eval is a default, built-in eval or a custom eval""" @@ -79,6 +82,23 @@ class CustomEvalCreateOrUpdateSchema(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class CustomEvalCreateOrUpdateSchemaGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + class DefaultEvalUpdateSchema(TypedDict, total=False): project_id: Required[str] @@ -91,11 +111,8 @@ class DefaultEvalUpdateSchema(TypedDict, total=False): enabled: bool """Allows the evaluation to be disabled without removing it""" - guardrailed_fallback_message: Optional[str] - """ - Fallback message to use if this eval fails and causes the response to be - guardrailed - """ + guardrailed_fallback: Optional[DefaultEvalUpdateSchemaGuardrailedFallback] + """message, priority, type""" priority: Optional[int] """ @@ -119,4 +136,21 @@ class DefaultEvalUpdateSchema(TypedDict, total=False): """Whether the evaluation fails when score is above or below the threshold""" +class DefaultEvalUpdateSchemaGuardrailedFallback(TypedDict, total=False): + message: Required[str] + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: Required[int] + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Required[Literal["ai_guidance", "expert_answer"]] + """Type of fallback to use if response is guardrailed""" + + EvalUpdateParams: TypeAlias = Union[CustomEvalCreateOrUpdateSchema, DefaultEvalUpdateSchema] diff --git a/src/codex/types/projects/query_log_list_by_group_response.py b/src/codex/types/projects/query_log_list_by_group_response.py index 5df928b6..0aa74bf4 100644 --- a/src/codex/types/projects/query_log_list_by_group_response.py +++ b/src/codex/types/projects/query_log_list_by_group_response.py @@ -16,8 +16,10 @@ "QueryLogsByGroupQueryLogFormattedNonGuardrailEvalScores", "QueryLogsByGroupQueryLogContext", "QueryLogsByGroupQueryLogDeterministicGuardrailsResults", + "QueryLogsByGroupQueryLogDeterministicGuardrailsResultsGuardrailedFallback", "QueryLogsByGroupQueryLogEvaluatedResponseToolCall", "QueryLogsByGroupQueryLogEvaluatedResponseToolCallFunction", + "QueryLogsByGroupQueryLogGuardrailedFallback", "QueryLogsByGroupQueryLogMessage", "QueryLogsByGroupQueryLogMessageChatCompletionAssistantMessageParamOutput", "QueryLogsByGroupQueryLogMessageChatCompletionAssistantMessageParamOutputAudio", @@ -98,12 +100,29 @@ class QueryLogsByGroupQueryLogContext(BaseModel): """Title or heading of the document. Useful for display and context.""" +class QueryLogsByGroupQueryLogDeterministicGuardrailsResultsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class QueryLogsByGroupQueryLogDeterministicGuardrailsResults(BaseModel): guardrail_name: str should_guardrail: bool - fallback_message: Optional[str] = None + guardrailed_fallback: Optional[QueryLogsByGroupQueryLogDeterministicGuardrailsResultsGuardrailedFallback] = None matches: Optional[List[str]] = None @@ -122,6 +141,26 @@ class QueryLogsByGroupQueryLogEvaluatedResponseToolCall(BaseModel): type: Literal["function"] +class QueryLogsByGroupQueryLogGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class QueryLogsByGroupQueryLogMessageChatCompletionAssistantMessageParamOutputAudio(BaseModel): id: str @@ -446,6 +485,12 @@ class QueryLogsByGroupQueryLog(BaseModel): guardrailed: Optional[bool] = None """If true, the response was guardrailed""" + guardrailed_fallback: Optional[QueryLogsByGroupQueryLogGuardrailedFallback] = None + """ + Name, fallback message, priority, and type for for the triggered guardrail with + the highest priority + """ + manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None """Manual review status override for remediations.""" diff --git a/src/codex/types/projects/query_log_list_groups_response.py b/src/codex/types/projects/query_log_list_groups_response.py index 727cac2a..e829f736 100644 --- a/src/codex/types/projects/query_log_list_groups_response.py +++ b/src/codex/types/projects/query_log_list_groups_response.py @@ -14,8 +14,10 @@ "FormattedNonGuardrailEvalScores", "Context", "DeterministicGuardrailsResults", + "DeterministicGuardrailsResultsGuardrailedFallback", "EvaluatedResponseToolCall", "EvaluatedResponseToolCallFunction", + "GuardrailedFallback", "Message", "MessageChatCompletionAssistantMessageParamOutput", "MessageChatCompletionAssistantMessageParamOutputAudio", @@ -95,12 +97,29 @@ class Context(BaseModel): """Title or heading of the document. Useful for display and context.""" +class DeterministicGuardrailsResultsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class DeterministicGuardrailsResults(BaseModel): guardrail_name: str should_guardrail: bool - fallback_message: Optional[str] = None + guardrailed_fallback: Optional[DeterministicGuardrailsResultsGuardrailedFallback] = None matches: Optional[List[str]] = None @@ -119,6 +138,26 @@ class EvaluatedResponseToolCall(BaseModel): type: Literal["function"] +class GuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class MessageChatCompletionAssistantMessageParamOutputAudio(BaseModel): id: str @@ -441,6 +480,12 @@ class QueryLogListGroupsResponse(BaseModel): guardrailed: Optional[bool] = None """If true, the response was guardrailed""" + guardrailed_fallback: Optional[GuardrailedFallback] = None + """ + Name, fallback message, priority, and type for for the triggered guardrail with + the highest priority + """ + manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None """Manual review status override for remediations.""" diff --git a/src/codex/types/projects/query_log_list_response.py b/src/codex/types/projects/query_log_list_response.py index 0a8b4275..b26272eb 100644 --- a/src/codex/types/projects/query_log_list_response.py +++ b/src/codex/types/projects/query_log_list_response.py @@ -14,8 +14,10 @@ "FormattedNonGuardrailEvalScores", "Context", "DeterministicGuardrailsResults", + "DeterministicGuardrailsResultsGuardrailedFallback", "EvaluatedResponseToolCall", "EvaluatedResponseToolCallFunction", + "GuardrailedFallback", "Message", "MessageChatCompletionAssistantMessageParamOutput", "MessageChatCompletionAssistantMessageParamOutputAudio", @@ -95,12 +97,29 @@ class Context(BaseModel): """Title or heading of the document. Useful for display and context.""" +class DeterministicGuardrailsResultsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class DeterministicGuardrailsResults(BaseModel): guardrail_name: str should_guardrail: bool - fallback_message: Optional[str] = None + guardrailed_fallback: Optional[DeterministicGuardrailsResultsGuardrailedFallback] = None matches: Optional[List[str]] = None @@ -119,6 +138,26 @@ class EvaluatedResponseToolCall(BaseModel): type: Literal["function"] +class GuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class MessageChatCompletionAssistantMessageParamOutputAudio(BaseModel): id: str @@ -429,6 +468,12 @@ class QueryLogListResponse(BaseModel): guardrailed: Optional[bool] = None """If true, the response was guardrailed""" + guardrailed_fallback: Optional[GuardrailedFallback] = None + """ + Name, fallback message, priority, and type for for the triggered guardrail with + the highest priority + """ + messages: Optional[List[Message]] = None """Message history to provide conversation context for the query. diff --git a/src/codex/types/projects/query_log_retrieve_response.py b/src/codex/types/projects/query_log_retrieve_response.py index 13510f87..0ef986f2 100644 --- a/src/codex/types/projects/query_log_retrieve_response.py +++ b/src/codex/types/projects/query_log_retrieve_response.py @@ -14,8 +14,10 @@ "FormattedNonGuardrailEvalScores", "Context", "DeterministicGuardrailsResults", + "DeterministicGuardrailsResultsGuardrailedFallback", "EvaluatedResponseToolCall", "EvaluatedResponseToolCallFunction", + "GuardrailedFallback", "Message", "MessageChatCompletionAssistantMessageParamOutput", "MessageChatCompletionAssistantMessageParamOutputAudio", @@ -95,12 +97,29 @@ class Context(BaseModel): """Title or heading of the document. Useful for display and context.""" +class DeterministicGuardrailsResultsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class DeterministicGuardrailsResults(BaseModel): guardrail_name: str should_guardrail: bool - fallback_message: Optional[str] = None + guardrailed_fallback: Optional[DeterministicGuardrailsResultsGuardrailedFallback] = None matches: Optional[List[str]] = None @@ -119,6 +138,26 @@ class EvaluatedResponseToolCall(BaseModel): type: Literal["function"] +class GuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class MessageChatCompletionAssistantMessageParamOutputAudio(BaseModel): id: str @@ -433,6 +472,12 @@ class QueryLogRetrieveResponse(BaseModel): guardrailed: Optional[bool] = None """If true, the response was guardrailed""" + guardrailed_fallback: Optional[GuardrailedFallback] = None + """ + Name, fallback message, priority, and type for for the triggered guardrail with + the highest priority + """ + manual_review_status_override: Optional[Literal["addressed", "unaddressed"]] = None """Manual review status override for remediations.""" diff --git a/src/codex/types/projects/remediation_list_resolved_logs_response.py b/src/codex/types/projects/remediation_list_resolved_logs_response.py index 16017b45..e04e4130 100644 --- a/src/codex/types/projects/remediation_list_resolved_logs_response.py +++ b/src/codex/types/projects/remediation_list_resolved_logs_response.py @@ -15,8 +15,10 @@ "QueryLogFormattedNonGuardrailEvalScores", "QueryLogContext", "QueryLogDeterministicGuardrailsResults", + "QueryLogDeterministicGuardrailsResultsGuardrailedFallback", "QueryLogEvaluatedResponseToolCall", "QueryLogEvaluatedResponseToolCallFunction", + "QueryLogGuardrailedFallback", "QueryLogMessage", "QueryLogMessageChatCompletionAssistantMessageParamOutput", "QueryLogMessageChatCompletionAssistantMessageParamOutputAudio", @@ -96,12 +98,29 @@ class QueryLogContext(BaseModel): """Title or heading of the document. Useful for display and context.""" +class QueryLogDeterministicGuardrailsResultsGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + class QueryLogDeterministicGuardrailsResults(BaseModel): guardrail_name: str should_guardrail: bool - fallback_message: Optional[str] = None + guardrailed_fallback: Optional[QueryLogDeterministicGuardrailsResultsGuardrailedFallback] = None matches: Optional[List[str]] = None @@ -120,6 +139,26 @@ class QueryLogEvaluatedResponseToolCall(BaseModel): type: Literal["function"] +class QueryLogGuardrailedFallback(BaseModel): + message: str + """ + Fallback message to use if this eval fails and causes the response to be + guardrailed + """ + + priority: int + """ + Priority order for guardrails (lower number = higher priority) to determine + which fallback to use if multiple guardrails are triggered + """ + + type: Literal["ai_guidance", "expert_answer"] + """Type of fallback to use if response is guardrailed""" + + guardrail_name: Optional[str] = None + """Name of the guardrail that triggered the fallback""" + + class QueryLogMessageChatCompletionAssistantMessageParamOutputAudio(BaseModel): id: str @@ -436,6 +475,12 @@ class QueryLog(BaseModel): guardrailed: Optional[bool] = None """If true, the response was guardrailed""" + guardrailed_fallback: Optional[QueryLogGuardrailedFallback] = None + """ + Name, fallback message, priority, and type for for the triggered guardrail with + the highest priority + """ + messages: Optional[List[QueryLogMessage]] = None """Message history to provide conversation context for the query. diff --git a/tests/api_resources/projects/test_evals.py b/tests/api_resources/projects/test_evals.py index 1ccde4f3..4fb6a6ba 100644 --- a/tests/api_resources/projects/test_evals.py +++ b/tests/api_resources/projects/test_evals.py @@ -39,7 +39,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: name="name", context_identifier="context_identifier", enabled=True, - guardrailed_fallback_message="guardrailed_fallback_message", + guardrailed_fallback={ + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, is_default=True, priority=0, query_identifier="query_identifier", @@ -117,7 +121,11 @@ def test_method_update_with_all_params_overload_1(self, client: Codex) -> None: name="name", context_identifier="context_identifier", enabled=True, - guardrailed_fallback_message="guardrailed_fallback_message", + guardrailed_fallback={ + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, is_default=True, priority=0, query_identifier="query_identifier", @@ -202,7 +210,11 @@ def test_method_update_with_all_params_overload_2(self, client: Codex) -> None: project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", body_eval_key="eval_key", enabled=True, - guardrailed_fallback_message="guardrailed_fallback_message", + guardrailed_fallback={ + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, priority=0, should_escalate=True, should_guardrail=True, @@ -390,7 +402,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> name="name", context_identifier="context_identifier", enabled=True, - guardrailed_fallback_message="guardrailed_fallback_message", + guardrailed_fallback={ + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, is_default=True, priority=0, query_identifier="query_identifier", @@ -468,7 +484,11 @@ async def test_method_update_with_all_params_overload_1(self, async_client: Asyn name="name", context_identifier="context_identifier", enabled=True, - guardrailed_fallback_message="guardrailed_fallback_message", + guardrailed_fallback={ + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, is_default=True, priority=0, query_identifier="query_identifier", @@ -553,7 +573,11 @@ async def test_method_update_with_all_params_overload_2(self, async_client: Asyn project_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e", body_eval_key="eval_key", enabled=True, - guardrailed_fallback_message="guardrailed_fallback_message", + guardrailed_fallback={ + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, priority=0, should_escalate=True, should_guardrail=True, diff --git a/tests/api_resources/test_projects.py b/tests/api_resources/test_projects.py index d83fdd1d..97f973c1 100644 --- a/tests/api_resources/test_projects.py +++ b/tests/api_resources/test_projects.py @@ -51,7 +51,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "name": "name", "context_identifier": "context_identifier", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "is_default": True, "priority": 0, "query_identifier": "query_identifier", @@ -68,7 +72,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -79,7 +87,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -90,7 +102,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -101,7 +117,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -112,7 +132,11 @@ def test_method_create_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -236,7 +260,11 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "name": "name", "context_identifier": "context_identifier", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "is_default": True, "priority": 0, "query_identifier": "query_identifier", @@ -253,7 +281,11 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -264,7 +296,11 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -275,7 +311,11 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -286,7 +326,11 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -297,7 +341,11 @@ def test_method_update_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -513,7 +561,11 @@ def test_method_detect_with_all_params(self, client: Codex) -> None: "name": "name", "context_identifier": "context_identifier", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "is_default": True, "priority": 0, "query_identifier": "query_identifier", @@ -530,7 +582,11 @@ def test_method_detect_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -541,7 +597,11 @@ def test_method_detect_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -552,7 +612,11 @@ def test_method_detect_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -563,7 +627,11 @@ def test_method_detect_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -574,7 +642,11 @@ def test_method_detect_with_all_params(self, client: Codex) -> None: "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -986,7 +1058,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "name": "name", "context_identifier": "context_identifier", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "is_default": True, "priority": 0, "query_identifier": "query_identifier", @@ -1003,7 +1079,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1014,7 +1094,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1025,7 +1109,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1036,7 +1124,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1047,7 +1139,11 @@ async def test_method_create_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1171,7 +1267,11 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "name": "name", "context_identifier": "context_identifier", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "is_default": True, "priority": 0, "query_identifier": "query_identifier", @@ -1188,7 +1288,11 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1199,7 +1303,11 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1210,7 +1318,11 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1221,7 +1333,11 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1232,7 +1348,11 @@ async def test_method_update_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1448,7 +1568,11 @@ async def test_method_detect_with_all_params(self, async_client: AsyncCodex) -> "name": "name", "context_identifier": "context_identifier", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "is_default": True, "priority": 0, "query_identifier": "query_identifier", @@ -1465,7 +1589,11 @@ async def test_method_detect_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1476,7 +1604,11 @@ async def test_method_detect_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1487,7 +1619,11 @@ async def test_method_detect_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1498,7 +1634,11 @@ async def test_method_detect_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, @@ -1509,7 +1649,11 @@ async def test_method_detect_with_all_params(self, async_client: AsyncCodex) -> "eval_key": "eval_key", "name": "name", "enabled": True, - "guardrailed_fallback_message": "guardrailed_fallback_message", + "guardrailed_fallback": { + "message": "message", + "priority": 0, + "type": "ai_guidance", + }, "priority": 0, "should_escalate": True, "should_guardrail": True, From 10fff82bf6da70c6bab58ec113e64b50d21e182f Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 5 Nov 2025 19:18:15 +0000 Subject: [PATCH 7/7] release: 0.1.0-alpha.32 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ pyproject.toml | 2 +- src/codex/_version.py | 2 +- 4 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a899ac74..2ce25fec 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "0.1.0-alpha.31" + ".": "0.1.0-alpha.32" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 35619b0d..be7448dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## 0.1.0-alpha.32 (2025-11-05) + +Full Changelog: [v0.1.0-alpha.31...v0.1.0-alpha.32](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.31...v0.1.0-alpha.32) + +### Features + +* **api:** api update ([e8c8537](https://github.com/cleanlab/codex-python/commit/e8c853706394437e0738d53594a7d635df224117)) + + +### Bug Fixes + +* **client:** close streams without requiring full consumption ([b5a889c](https://github.com/cleanlab/codex-python/commit/b5a889c4bdd788df03f310e30d6a3ee304193f53)) + + +### Chores + +* **internal/tests:** avoid race condition with implicit client cleanup ([8f06048](https://github.com/cleanlab/codex-python/commit/8f06048416bf39cdad0290b23fd8d930be869223)) +* **internal:** grammar fix (it's -> its) ([9d4a431](https://github.com/cleanlab/codex-python/commit/9d4a4318fac51055f63af10ba451b26bc6c1f179)) + ## 0.1.0-alpha.31 (2025-10-28) Full Changelog: [v0.1.0-alpha.30...v0.1.0-alpha.31](https://github.com/cleanlab/codex-python/compare/v0.1.0-alpha.30...v0.1.0-alpha.31) diff --git a/pyproject.toml b/pyproject.toml index 83b0c508..060cc09f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "codex-sdk" -version = "0.1.0-alpha.31" +version = "0.1.0-alpha.32" description = "Internal SDK used within cleanlab-codex package. Refer to https://pypi.org/project/cleanlab-codex/ instead." dynamic = ["readme"] license = "MIT" diff --git a/src/codex/_version.py b/src/codex/_version.py index f48cd030..57e8ea7c 100644 --- a/src/codex/_version.py +++ b/src/codex/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "codex" -__version__ = "0.1.0-alpha.31" # x-release-please-version +__version__ = "0.1.0-alpha.32" # x-release-please-version