diff --git a/.stats.yml b/.stats.yml index 21e3388..87850ac 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,2 +1,2 @@ configured_endpoints: 21 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-ec010a81f2e8cfe74de9842b6fcd84599fd1d3f439ba3eb868fb5bdbfa2fa260.yml +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/prompt-foundry%2Fprompt-foundry-sdk-101dab0674c0098868baea52c37050f14ce1dffeadecc3978c77e7b8a47c608c.yml diff --git a/src/prompt_foundry_python_sdk/resources/evaluation_assertions.py b/src/prompt_foundry_python_sdk/resources/evaluation_assertions.py index c52d832..bbeca3f 100644 --- a/src/prompt_foundry_python_sdk/resources/evaluation_assertions.py +++ b/src/prompt_foundry_python_sdk/resources/evaluation_assertions.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Optional +from typing import List, Optional from typing_extensions import Literal import httpx @@ -47,11 +47,21 @@ def create( *, evaluation_id: str, json_path: Optional[str], - target_value: Optional[str], + target_threshold: Optional[float], + target_values: Optional[List[str]], tool_name: Optional[str], type: Literal[ - "CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH" + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", ], + ignore_case: bool | NotGiven = NOT_GIVEN, + negate: bool | NotGiven = NOT_GIVEN, weight: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -88,9 +98,12 @@ def create( { "evaluation_id": evaluation_id, "json_path": json_path, - "target_value": target_value, + "target_threshold": target_threshold, + "target_values": target_values, "tool_name": tool_name, "type": type, + "ignore_case": ignore_case, + "negate": negate, "weight": weight, }, evaluation_assertion_create_params.EvaluationAssertionCreateParams, @@ -107,11 +120,21 @@ def update( *, evaluation_id: str, json_path: Optional[str], - target_value: Optional[str], + target_threshold: Optional[float], + target_values: Optional[List[str]], tool_name: Optional[str], type: Literal[ - "CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH" + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", ], + ignore_case: bool | NotGiven = NOT_GIVEN, + negate: bool | NotGiven = NOT_GIVEN, weight: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -150,9 +173,12 @@ def update( { "evaluation_id": evaluation_id, "json_path": json_path, - "target_value": target_value, + "target_threshold": target_threshold, + "target_values": target_values, "tool_name": tool_name, "type": type, + "ignore_case": ignore_case, + "negate": negate, "weight": weight, }, evaluation_assertion_update_params.EvaluationAssertionUpdateParams, @@ -283,11 +309,21 @@ async def create( *, evaluation_id: str, json_path: Optional[str], - target_value: Optional[str], + target_threshold: Optional[float], + target_values: Optional[List[str]], tool_name: Optional[str], type: Literal[ - "CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH" + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", ], + ignore_case: bool | NotGiven = NOT_GIVEN, + negate: bool | NotGiven = NOT_GIVEN, weight: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -324,9 +360,12 @@ async def create( { "evaluation_id": evaluation_id, "json_path": json_path, - "target_value": target_value, + "target_threshold": target_threshold, + "target_values": target_values, "tool_name": tool_name, "type": type, + "ignore_case": ignore_case, + "negate": negate, "weight": weight, }, evaluation_assertion_create_params.EvaluationAssertionCreateParams, @@ -343,11 +382,21 @@ async def update( *, evaluation_id: str, json_path: Optional[str], - target_value: Optional[str], + target_threshold: Optional[float], + target_values: Optional[List[str]], tool_name: Optional[str], type: Literal[ - "CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH" + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", ], + ignore_case: bool | NotGiven = NOT_GIVEN, + negate: bool | NotGiven = NOT_GIVEN, weight: float | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -386,9 +435,12 @@ async def update( { "evaluation_id": evaluation_id, "json_path": json_path, - "target_value": target_value, + "target_threshold": target_threshold, + "target_values": target_values, "tool_name": tool_name, "type": type, + "ignore_case": ignore_case, + "negate": negate, "weight": weight, }, evaluation_assertion_update_params.EvaluationAssertionUpdateParams, diff --git a/src/prompt_foundry_python_sdk/types/evaluation_assertion.py b/src/prompt_foundry_python_sdk/types/evaluation_assertion.py index 5eeaea0..1cae096 100644 --- a/src/prompt_foundry_python_sdk/types/evaluation_assertion.py +++ b/src/prompt_foundry_python_sdk/types/evaluation_assertion.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Optional +from typing import List, Optional from typing_extensions import Literal from pydantic import Field as FieldInfo @@ -21,7 +21,9 @@ class EvaluationAssertion(BaseModel): Only required when type is `JSON_EXACT_MATCH` or `JSON_CONTAINS`. """ - target_value: Optional[str] = FieldInfo(alias="targetValue", default=None) + target_threshold: Optional[float] = FieldInfo(alias="targetThreshold", default=None) + + target_values: Optional[List[str]] = FieldInfo(alias="targetValues", default=None) tool_name: Optional[str] = FieldInfo(alias="toolName", default=None) """The name of the tool to match. @@ -29,8 +31,21 @@ class EvaluationAssertion(BaseModel): Only required when type is `TOOL_CALLED` or `TOOL_CALLED_WITH`. """ - type: Literal["CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"] + type: Literal[ + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", + ] """The type of evaluation matcher to use.""" + ignore_case: Optional[bool] = FieldInfo(alias="ignoreCase", default=None) + + negate: Optional[bool] = None + weight: Optional[float] = None """How heavily to weigh the assertion within the evaluation.""" diff --git a/src/prompt_foundry_python_sdk/types/evaluation_assertion_create_params.py b/src/prompt_foundry_python_sdk/types/evaluation_assertion_create_params.py index 3adaf1e..3d9dad1 100644 --- a/src/prompt_foundry_python_sdk/types/evaluation_assertion_create_params.py +++ b/src/prompt_foundry_python_sdk/types/evaluation_assertion_create_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Optional +from typing import List, Optional from typing_extensions import Literal, Required, Annotated, TypedDict from .._utils import PropertyInfo @@ -19,7 +19,9 @@ class EvaluationAssertionCreateParams(TypedDict, total=False): Only required when type is `JSON_EXACT_MATCH` or `JSON_CONTAINS`. """ - target_value: Required[Annotated[Optional[str], PropertyInfo(alias="targetValue")]] + target_threshold: Required[Annotated[Optional[float], PropertyInfo(alias="targetThreshold")]] + + target_values: Required[Annotated[Optional[List[str]], PropertyInfo(alias="targetValues")]] tool_name: Required[Annotated[Optional[str], PropertyInfo(alias="toolName")]] """The name of the tool to match. @@ -28,9 +30,22 @@ class EvaluationAssertionCreateParams(TypedDict, total=False): """ type: Required[ - Literal["CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"] + Literal[ + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", + ] ] """The type of evaluation matcher to use.""" + ignore_case: Annotated[bool, PropertyInfo(alias="ignoreCase")] + + negate: bool + weight: float """How heavily to weigh the assertion within the evaluation.""" diff --git a/src/prompt_foundry_python_sdk/types/evaluation_assertion_update_params.py b/src/prompt_foundry_python_sdk/types/evaluation_assertion_update_params.py index 8c9813c..81a2ae6 100644 --- a/src/prompt_foundry_python_sdk/types/evaluation_assertion_update_params.py +++ b/src/prompt_foundry_python_sdk/types/evaluation_assertion_update_params.py @@ -2,7 +2,7 @@ from __future__ import annotations -from typing import Optional +from typing import List, Optional from typing_extensions import Literal, Required, Annotated, TypedDict from .._utils import PropertyInfo @@ -19,7 +19,9 @@ class EvaluationAssertionUpdateParams(TypedDict, total=False): Only required when type is `JSON_EXACT_MATCH` or `JSON_CONTAINS`. """ - target_value: Required[Annotated[Optional[str], PropertyInfo(alias="targetValue")]] + target_threshold: Required[Annotated[Optional[float], PropertyInfo(alias="targetThreshold")]] + + target_values: Required[Annotated[Optional[List[str]], PropertyInfo(alias="targetValues")]] tool_name: Required[Annotated[Optional[str], PropertyInfo(alias="toolName")]] """The name of the tool to match. @@ -28,9 +30,22 @@ class EvaluationAssertionUpdateParams(TypedDict, total=False): """ type: Required[ - Literal["CONTAINS", "EXACT_MATCH", "JSON_CONTAINS", "JSON_EXACT_MATCH", "TOOL_CALLED", "TOOL_CALLED_WITH"] + Literal[ + "CONTAINS_ALL", + "CONTAINS_ANY", + "COST", + "EXACT_MATCH", + "LATENCY", + "STARTS_WITH", + "TOOL_CALLED", + "TOOL_CALLED_WITH", + ] ] """The type of evaluation matcher to use.""" + ignore_case: Annotated[bool, PropertyInfo(alias="ignoreCase")] + + negate: bool + weight: float """How heavily to weigh the assertion within the evaluation.""" diff --git a/tests/api_resources/test_evaluation_assertions.py b/tests/api_resources/test_evaluation_assertions.py index 302c562..a8904b6 100644 --- a/tests/api_resources/test_evaluation_assertions.py +++ b/tests/api_resources/test_evaluation_assertions.py @@ -26,9 +26,10 @@ def test_method_create(self, client: PromptFoundry) -> None: evaluation_assertion = client.evaluation_assertions.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -37,9 +38,12 @@ def test_method_create_with_all_params(self, client: PromptFoundry) -> None: evaluation_assertion = client.evaluation_assertions.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", + ignore_case=True, + negate=True, weight=0, ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -49,9 +53,10 @@ def test_raw_response_create(self, client: PromptFoundry) -> None: response = client.evaluation_assertions.with_raw_response.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert response.is_closed is True @@ -64,9 +69,10 @@ def test_streaming_response_create(self, client: PromptFoundry) -> None: with client.evaluation_assertions.with_streaming_response.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -82,9 +88,10 @@ def test_method_update(self, client: PromptFoundry) -> None: id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -94,9 +101,12 @@ def test_method_update_with_all_params(self, client: PromptFoundry) -> None: id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", + ignore_case=True, + negate=True, weight=0, ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -107,9 +117,10 @@ def test_raw_response_update(self, client: PromptFoundry) -> None: id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert response.is_closed is True @@ -123,9 +134,10 @@ def test_streaming_response_update(self, client: PromptFoundry) -> None: id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -142,9 +154,10 @@ def test_path_params_update(self, client: PromptFoundry) -> None: id="", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) @parametrize @@ -264,9 +277,10 @@ async def test_method_create(self, async_client: AsyncPromptFoundry) -> None: evaluation_assertion = await async_client.evaluation_assertions.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -275,9 +289,12 @@ async def test_method_create_with_all_params(self, async_client: AsyncPromptFoun evaluation_assertion = await async_client.evaluation_assertions.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", + ignore_case=True, + negate=True, weight=0, ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -287,9 +304,10 @@ async def test_raw_response_create(self, async_client: AsyncPromptFoundry) -> No response = await async_client.evaluation_assertions.with_raw_response.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert response.is_closed is True @@ -302,9 +320,10 @@ async def test_streaming_response_create(self, async_client: AsyncPromptFoundry) async with async_client.evaluation_assertions.with_streaming_response.create( evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -320,9 +339,10 @@ async def test_method_update(self, async_client: AsyncPromptFoundry) -> None: id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -332,9 +352,12 @@ async def test_method_update_with_all_params(self, async_client: AsyncPromptFoun id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", + ignore_case=True, + negate=True, weight=0, ) assert_matches_type(EvaluationAssertion, evaluation_assertion, path=["response"]) @@ -345,9 +368,10 @@ async def test_raw_response_update(self, async_client: AsyncPromptFoundry) -> No id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) assert response.is_closed is True @@ -361,9 +385,10 @@ async def test_streaming_response_update(self, async_client: AsyncPromptFoundry) id="1212121", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) as response: assert not response.is_closed assert response.http_request.headers.get("X-Stainless-Lang") == "python" @@ -380,9 +405,10 @@ async def test_path_params_update(self, async_client: AsyncPromptFoundry) -> Non id="", evaluation_id="evaluationId", json_path="jsonPath", - target_value="targetValue", + target_threshold=0, + target_values=["string", "string", "string"], tool_name="toolName", - type="CONTAINS", + type="CONTAINS_ALL", ) @parametrize