From 861f1055768168ab04987a42efcd32a07bc93542 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Wed, 14 May 2025 23:07:08 +0000 Subject: [PATCH 1/8] chore(ci): upload sdks to package manager --- .github/workflows/ci.yml | 24 ++++++++++++++++++++++++ scripts/utils/upload-artifact.sh | 25 +++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100755 scripts/utils/upload-artifact.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e1e21f3fae..e853b86695 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,6 +30,30 @@ jobs: - name: Run lints run: ./scripts/lint + upload: + if: github.repository == 'stainless-sdks/openai-python' + timeout-minutes: 10 + name: upload + permissions: + contents: read + id-token: write + runs-on: depot-ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + + - name: Get GitHub OIDC Token + id: github-oidc + uses: actions/github-script@v6 + with: + script: core.setOutput('github_token', await core.getIDToken()); + + - name: Upload tarball + env: + URL: https://pkg.stainless.com/s + AUTH: ${{ steps.github-oidc.outputs.github_token }} + SHA: ${{ github.sha }} + run: ./scripts/utils/upload-artifact.sh + test: timeout-minutes: 10 name: test diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh new file mode 100755 index 0000000000..b9ab47d945 --- /dev/null +++ b/scripts/utils/upload-artifact.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -exuo pipefail + +RESPONSE=$(curl -X POST "$URL" \ + -H "Authorization: Bearer $AUTH" \ + -H "Content-Type: application/json") + +SIGNED_URL=$(echo "$RESPONSE" | jq -r '.url') + +if [[ "$SIGNED_URL" == "null" ]]; then + echo -e "\033[31mFailed to get signed URL.\033[0m" + exit 1 +fi + +UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ + -H "Content-Type: application/gzip" \ + --data-binary @- "$SIGNED_URL" 2>&1) + +if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then + echo -e "\033[32mUploaded build to Stainless storage.\033[0m" + echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/openai-python/$SHA'\033[0m" +else + echo -e "\033[31mFailed to upload artifact.\033[0m" + exit 1 +fi From f26c5fc85d98d700b68cb55c8be5d15983a9aeaf Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 12:25:19 +0000 Subject: [PATCH 2/8] chore(ci): fix installation instructions --- scripts/utils/upload-artifact.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/utils/upload-artifact.sh b/scripts/utils/upload-artifact.sh index b9ab47d945..75198de98f 100755 --- a/scripts/utils/upload-artifact.sh +++ b/scripts/utils/upload-artifact.sh @@ -18,7 +18,7 @@ UPLOAD_RESPONSE=$(tar -cz . | curl -v -X PUT \ if echo "$UPLOAD_RESPONSE" | grep -q "HTTP/[0-9.]* 200"; then echo -e "\033[32mUploaded build to Stainless storage.\033[0m" - echo -e "\033[32mInstallation: npm install 'https://pkg.stainless.com/s/openai-python/$SHA'\033[0m" + echo -e "\033[32mInstallation: pip install 'https://pkg.stainless.com/s/openai-python/$SHA'\033[0m" else echo -e "\033[31mFailed to upload artifact.\033[0m" exit 1 From fd586cbdf889c9a5c6b9be177ff02fbfffa3eba5 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 21:34:07 +0000 Subject: [PATCH 3/8] feat(api): responses x eval api --- .stats.yml | 6 +- api.md | 2 + src/openai/resources/audio/transcriptions.py | 94 +++++++- src/openai/resources/embeddings.py | 14 +- src/openai/types/__init__.py | 1 + .../audio/transcription_create_params.py | 38 ++- src/openai/types/embedding_create_params.py | 9 +- src/openai/types/eval_create_params.py | 15 +- src/openai/types/eval_create_response.py | 4 +- src/openai/types/eval_list_response.py | 4 +- .../types/eval_logs_data_source_config.py | 32 +++ src/openai/types/eval_retrieve_response.py | 4 +- ...l_stored_completions_data_source_config.py | 4 +- src/openai/types/eval_update_response.py | 4 +- src/openai/types/evals/__init__.py | 4 + .../create_eval_responses_run_data_source.py | 206 ++++++++++++++++ ...te_eval_responses_run_data_source_param.py | 202 ++++++++++++++++ src/openai/types/evals/run_cancel_response.py | 218 +---------------- src/openai/types/evals/run_create_params.py | 221 +----------------- src/openai/types/evals/run_create_response.py | 218 +---------------- src/openai/types/evals/run_list_response.py | 218 +---------------- .../types/evals/run_retrieve_response.py | 218 +---------------- .../types/fine_tuning/fine_tuning_job.py | 2 +- .../audio/test_transcriptions.py | 4 + 24 files changed, 645 insertions(+), 1097 deletions(-) create mode 100644 src/openai/types/eval_logs_data_source_config.py create mode 100644 src/openai/types/evals/create_eval_responses_run_data_source.py create mode 100644 src/openai/types/evals/create_eval_responses_run_data_source_param.py diff --git a/.stats.yml b/.stats.yml index 5f1bee851b..11ba2b0101 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-794a6ed3c3d3d77887564755168056af8a426b17cf1ec721e3a300503dc22a41.yml -openapi_spec_hash: 25a81c220713cd5b0bafc221d1dfa79a -config_hash: 0b768ed1b56c6d82816f0fa40dc4aaf5 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml +openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 +config_hash: 7da27f7260075e8813ddcea542fba1bf diff --git a/api.md b/api.md index 496e5548b3..db505b20d1 100644 --- a/api.md +++ b/api.md @@ -787,6 +787,7 @@ Types: ```python from openai.types import ( EvalCustomDataSourceConfig, + EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig, EvalCreateResponse, EvalRetrieveResponse, @@ -812,6 +813,7 @@ Types: from openai.types.evals import ( CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, + CreateEvalResponsesRunDataSource, EvalAPIError, RunCreateResponse, RunRetrieveResponse, diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 0c7ebca7a6..9d4f7e9255 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -57,6 +57,7 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, @@ -118,6 +119,7 @@ def create( file: FileTypes, model: Union[str, AudioModel], stream: Literal[True], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -152,6 +154,11 @@ def create( Note: Streaming is not supported for the `whisper-1` model and will be ignored. + chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server + first normalizes loudness and then uses voice activity detection (VAD) to choose + boundaries. `server_vad` object can be provided to tweak VAD detection + parameters manually. If unset, the audio is transcribed as a single block. + include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with @@ -200,6 +207,7 @@ def create( file: FileTypes, model: Union[str, AudioModel], stream: bool, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -234,6 +242,11 @@ def create( Note: Streaming is not supported for the `whisper-1` model and will be ignored. + chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server + first normalizes loudness and then uses voice activity detection (VAD) to choose + boundaries. `server_vad` object can be provided to tweak VAD detection + parameters manually. If unset, the audio is transcribed as a single block. + include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with @@ -281,6 +294,7 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -299,6 +313,7 @@ def create( { "file": file, "model": model, + "chunking_strategy": chunking_strategy, "include": include, "language": language, "prompt": prompt, @@ -357,6 +372,8 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, + include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -369,7 +386,68 @@ async def create( extra_query: Query | None = None, extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, - ) -> Transcription: ... + ) -> TranscriptionCreateResponse: + """ + Transcribes audio into the input language. + + Args: + file: + The audio file object (not file name) to transcribe, in one of these formats: + flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + + model: ID of the model to use. The options are `gpt-4o-transcribe`, + `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source + Whisper V2 model). + + chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server + first normalizes loudness and then uses voice activity detection (VAD) to choose + boundaries. `server_vad` object can be provided to tweak VAD detection + parameters manually. If unset, the audio is transcribed as a single block. + + include: Additional information to include in the transcription response. `logprobs` will + return the log probabilities of the tokens in the response to understand the + model's confidence in the transcription. `logprobs` only works with + response_format set to `json` and only with the models `gpt-4o-transcribe` and + `gpt-4o-mini-transcribe`. + + language: The language of the input audio. Supplying the input language in + [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`) + format will improve accuracy and latency. + + prompt: An optional text to guide the model's style or continue a previous audio + segment. The + [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting) + should match the audio language. + + response_format: The format of the output, in one of these options: `json`, `text`, `srt`, + `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`, + the only supported format is `json`. + + stream: If set to true, the model response data will be streamed to the client as it is + generated using + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format). + See the + [Streaming section of the Speech-to-Text guide](https://platform.openai.com/docs/guides/speech-to-text?lang=curl#streaming-transcriptions) + for more information. + + Note: Streaming is not supported for the `whisper-1` model and will be ignored. + + temperature: The sampling temperature, between 0 and 1. Higher values like 0.8 will make the + output more random, while lower values like 0.2 will make it more focused and + deterministic. If set to 0, the model will use + [log probability](https://en.wikipedia.org/wiki/Log_probability) to + automatically increase the temperature until certain thresholds are hit. + + timestamp_granularities: The timestamp granularities to populate for this transcription. + `response_format` must be set `verbose_json` to use timestamp granularities. + Either or both of these options are supported: `word`, or `segment`. Note: There + is no additional latency for segment timestamps, but generating word timestamps + incurs additional latency. + + extra_headers: Send extra headers + + extra_query: Add additional query parameters to the request + """ @overload async def create( @@ -418,6 +496,7 @@ async def create( file: FileTypes, model: Union[str, AudioModel], stream: Literal[True], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -452,6 +531,11 @@ async def create( Note: Streaming is not supported for the `whisper-1` model and will be ignored. + chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server + first normalizes loudness and then uses voice activity detection (VAD) to choose + boundaries. `server_vad` object can be provided to tweak VAD detection + parameters manually. If unset, the audio is transcribed as a single block. + include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with @@ -500,6 +584,7 @@ async def create( file: FileTypes, model: Union[str, AudioModel], stream: bool, + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -534,6 +619,11 @@ async def create( Note: Streaming is not supported for the `whisper-1` model and will be ignored. + chunking_strategy: Controls how the audio is cut into chunks. When set to `"auto"`, the server + first normalizes loudness and then uses voice activity detection (VAD) to choose + boundaries. `server_vad` object can be provided to tweak VAD detection + parameters manually. If unset, the audio is transcribed as a single block. + include: Additional information to include in the transcription response. `logprobs` will return the log probabilities of the tokens in the response to understand the model's confidence in the transcription. `logprobs` only works with @@ -581,6 +671,7 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, @@ -599,6 +690,7 @@ async def create( { "file": file, "model": model, + "chunking_strategy": chunking_strategy, "include": include, "language": language, "prompt": prompt, diff --git a/src/openai/resources/embeddings.py b/src/openai/resources/embeddings.py index a392d5eb17..553dacc284 100644 --- a/src/openai/resources/embeddings.py +++ b/src/openai/resources/embeddings.py @@ -66,11 +66,12 @@ def create( input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for - `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. Some models may also impose a limit on total number of - tokens summed across inputs. + for counting tokens. In addition to the per-input token limit, all embedding + models enforce a maximum of 300,000 tokens summed across all inputs in a single + request. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to @@ -181,11 +182,12 @@ async def create( input: Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for - `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 + all embedding models), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. Some models may also impose a limit on total number of - tokens summed across inputs. + for counting tokens. In addition to the per-input token limit, all embedding + models enforce a maximum of 300,000 tokens summed across all inputs in a single + request. model: ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index bf5493fd62..9f40033354 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -70,6 +70,7 @@ from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam +from .eval_logs_data_source_config import EvalLogsDataSourceConfig as EvalLogsDataSourceConfig from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions diff --git a/src/openai/types/audio/transcription_create_params.py b/src/openai/types/audio/transcription_create_params.py index 0cda4c7907..8271b054ab 100644 --- a/src/openai/types/audio/transcription_create_params.py +++ b/src/openai/types/audio/transcription_create_params.py @@ -3,7 +3,7 @@ from __future__ import annotations from typing import List, Union, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..._types import FileTypes from ..audio_model import AudioModel @@ -12,6 +12,8 @@ __all__ = [ "TranscriptionCreateParamsBase", + "ChunkingStrategy", + "ChunkingStrategyVadConfig", "TranscriptionCreateParamsNonStreaming", "TranscriptionCreateParamsStreaming", ] @@ -31,6 +33,15 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): (which is powered by our open source Whisper V2 model). """ + chunking_strategy: Optional[ChunkingStrategy] + """Controls how the audio is cut into chunks. + + When set to `"auto"`, the server first normalizes loudness and then uses voice + activity detection (VAD) to choose boundaries. `server_vad` object can be + provided to tweak VAD detection parameters manually. If unset, the audio is + transcribed as a single block. + """ + include: List[TranscriptionInclude] """Additional information to include in the transcription response. @@ -82,6 +93,31 @@ class TranscriptionCreateParamsBase(TypedDict, total=False): """ +class ChunkingStrategyVadConfig(TypedDict, total=False): + type: Required[Literal["server_vad"]] + """Must be set to `server_vad` to enable manual chunking using server side VAD.""" + + prefix_padding_ms: int + """Amount of audio to include before the VAD detected speech (in milliseconds).""" + + silence_duration_ms: int + """ + Duration of silence to detect speech stop (in milliseconds). With shorter values + the model will respond more quickly, but may jump in on short pauses from the + user. + """ + + threshold: float + """Sensitivity threshold (0.0 to 1.0) for voice activity detection. + + A higher threshold will require louder audio to activate the model, and thus + might perform better in noisy environments. + """ + + +ChunkingStrategy: TypeAlias = Union[Literal["auto"], ChunkingStrategyVadConfig] + + class TranscriptionCreateParamsNonStreaming(TranscriptionCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/openai/types/embedding_create_params.py b/src/openai/types/embedding_create_params.py index a90566449b..94edce10a4 100644 --- a/src/openai/types/embedding_create_params.py +++ b/src/openai/types/embedding_create_params.py @@ -16,11 +16,12 @@ class EmbeddingCreateParams(TypedDict, total=False): To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model - (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any - array must be 2048 dimensions or less. + (8192 tokens for all embedding models), cannot be an empty string, and any array + must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) - for counting tokens. Some models may also impose a limit on total number of - tokens summed across inputs. + for counting tokens. In addition to the per-input token limit, all embedding + models enforce a maximum of 300,000 tokens summed across all inputs in a single + request. """ model: Required[Union[str, EmbeddingModel]] diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index 3b712580a0..8d508a2d8e 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -16,6 +16,7 @@ "EvalCreateParams", "DataSourceConfig", "DataSourceConfigCustom", + "DataSourceConfigLogs", "DataSourceConfigStoredCompletions", "TestingCriterion", "TestingCriterionLabelModel", @@ -65,15 +66,23 @@ class DataSourceConfigCustom(TypedDict, total=False): """ +class DataSourceConfigLogs(TypedDict, total=False): + type: Required[Literal["logs"]] + """The type of data source. Always `logs`.""" + + metadata: Dict[str, object] + """Metadata filters for the logs data source.""" + + class DataSourceConfigStoredCompletions(TypedDict, total=False): - type: Required[Literal["stored_completions"]] - """The type of data source. Always `stored_completions`.""" + type: Required[Literal["stored-completions"]] + """The type of data source. Always `stored-completions`.""" metadata: Dict[str, object] """Metadata filters for the stored completions data source.""" -DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigStoredCompletions] +DataSourceConfig: TypeAlias = Union[DataSourceConfigCustom, DataSourceConfigLogs, DataSourceConfigStoredCompletions] class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False): diff --git a/src/openai/types/eval_create_response.py b/src/openai/types/eval_create_response.py index d5f158ad29..2bf7643b53 100644 --- a/src/openai/types/eval_create_response.py +++ b/src/openai/types/eval_create_response.py @@ -10,6 +10,7 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader +from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -24,7 +25,8 @@ ] DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type") + Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/eval_list_response.py b/src/openai/types/eval_list_response.py index b743f57f6a..e52f3db1c4 100644 --- a/src/openai/types/eval_list_response.py +++ b/src/openai/types/eval_list_response.py @@ -10,6 +10,7 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader +from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -24,7 +25,8 @@ ] DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type") + Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/eval_logs_data_source_config.py b/src/openai/types/eval_logs_data_source_config.py new file mode 100644 index 0000000000..a3eb245e07 --- /dev/null +++ b/src/openai/types/eval_logs_data_source_config.py @@ -0,0 +1,32 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, Optional +from typing_extensions import Literal + +from pydantic import Field as FieldInfo + +from .._models import BaseModel +from .shared.metadata import Metadata + +__all__ = ["EvalLogsDataSourceConfig"] + + +class EvalLogsDataSourceConfig(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The json schema for the run data source items. Learn how to build JSON schemas + [here](https://json-schema.org/). + """ + + type: Literal["logs"] + """The type of data source. Always `logs`.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ diff --git a/src/openai/types/eval_retrieve_response.py b/src/openai/types/eval_retrieve_response.py index dabb20674e..71ed96d5ab 100644 --- a/src/openai/types/eval_retrieve_response.py +++ b/src/openai/types/eval_retrieve_response.py @@ -10,6 +10,7 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader +from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -24,7 +25,8 @@ ] DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type") + Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/eval_stored_completions_data_source_config.py b/src/openai/types/eval_stored_completions_data_source_config.py index 98f86a4719..5016f0ae9c 100644 --- a/src/openai/types/eval_stored_completions_data_source_config.py +++ b/src/openai/types/eval_stored_completions_data_source_config.py @@ -18,8 +18,8 @@ class EvalStoredCompletionsDataSourceConfig(BaseModel): [here](https://json-schema.org/). """ - type: Literal["stored_completions"] - """The type of data source. Always `stored_completions`.""" + type: Literal["stored-completions"] + """The type of data source. Always `stored-completions`.""" metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. diff --git a/src/openai/types/eval_update_response.py b/src/openai/types/eval_update_response.py index c5cb2622ea..73ee6eb58c 100644 --- a/src/openai/types/eval_update_response.py +++ b/src/openai/types/eval_update_response.py @@ -10,6 +10,7 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader +from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -24,7 +25,8 @@ ] DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type") + Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/__init__.py b/src/openai/types/evals/__init__.py index ebf84c6b8d..9d26c7d915 100644 --- a/src/openai/types/evals/__init__.py +++ b/src/openai/types/evals/__init__.py @@ -11,12 +11,16 @@ from .run_delete_response import RunDeleteResponse as RunDeleteResponse from .run_retrieve_response import RunRetrieveResponse as RunRetrieveResponse from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource +from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import ( CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, ) from .create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam as CreateEvalJSONLRunDataSourceParam, ) +from .create_eval_responses_run_data_source_param import ( + CreateEvalResponsesRunDataSourceParam as CreateEvalResponsesRunDataSourceParam, +) from .create_eval_completions_run_data_source_param import ( CreateEvalCompletionsRunDataSourceParam as CreateEvalCompletionsRunDataSourceParam, ) diff --git a/src/openai/types/evals/create_eval_responses_run_data_source.py b/src/openai/types/evals/create_eval_responses_run_data_source.py new file mode 100644 index 0000000000..481fd0761e --- /dev/null +++ b/src/openai/types/evals/create_eval_responses_run_data_source.py @@ -0,0 +1,206 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Union, Optional +from typing_extensions import Literal, Annotated, TypeAlias + +from ..._utils import PropertyInfo +from ..._models import BaseModel +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text import ResponseInputText + +__all__ = [ + "CreateEvalResponsesRunDataSource", + "Source", + "SourceFileContent", + "SourceFileContentContent", + "SourceFileID", + "SourceResponses", + "InputMessages", + "InputMessagesTemplate", + "InputMessagesTemplateTemplate", + "InputMessagesTemplateTemplateChatMessage", + "InputMessagesTemplateTemplateEvalItem", + "InputMessagesTemplateTemplateEvalItemContent", + "InputMessagesTemplateTemplateEvalItemContentOutputText", + "InputMessagesItemReference", + "SamplingParams", +] + + +class SourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class SourceFileContent(BaseModel): + content: List[SourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class SourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + +class SourceResponses(BaseModel): + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] = None + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] = None + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] = None + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] = None + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] = None + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] = None + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] = None + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] = None + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] = None + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] = None + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] = None + """List of user identifiers. This is a query parameter used to select responses.""" + + +Source: TypeAlias = Annotated[ + Union[SourceFileContent, SourceFileID, SourceResponses], PropertyInfo(discriminator="type") +] + + +class InputMessagesTemplateTemplateChatMessage(BaseModel): + content: str + """The content of the message.""" + + role: str + """The role of the message (e.g. "system", "assistant", "user").""" + + +class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, ResponseInputText, InputMessagesTemplateTemplateEvalItemContentOutputText +] + + +class InputMessagesTemplateTemplateEvalItem(BaseModel): + content: InputMessagesTemplateTemplateEvalItemContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" + + +InputMessagesTemplateTemplate: TypeAlias = Union[ + InputMessagesTemplateTemplateChatMessage, InputMessagesTemplateTemplateEvalItem +] + + +class InputMessagesTemplate(BaseModel): + template: List[InputMessagesTemplateTemplate] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Literal["template"] + """The type of input messages. Always `template`.""" + + +class InputMessagesItemReference(BaseModel): + item_reference: str + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Literal["item_reference"] + """The type of input messages. Always `item_reference`.""" + + +InputMessages: TypeAlias = Annotated[ + Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type") +] + + +class SamplingParams(BaseModel): + max_completion_tokens: Optional[int] = None + """The maximum number of tokens in the generated output.""" + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class CreateEvalResponsesRunDataSource(BaseModel): + source: Source + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + input_messages: Optional[InputMessages] = None + + model: Optional[str] = None + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: Optional[SamplingParams] = None diff --git a/src/openai/types/evals/create_eval_responses_run_data_source_param.py b/src/openai/types/evals/create_eval_responses_run_data_source_param.py new file mode 100644 index 0000000000..9cde20de20 --- /dev/null +++ b/src/openai/types/evals/create_eval_responses_run_data_source_param.py @@ -0,0 +1,202 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text_param import ResponseInputTextParam + +__all__ = [ + "CreateEvalResponsesRunDataSourceParam", + "Source", + "SourceFileContent", + "SourceFileContentContent", + "SourceFileID", + "SourceResponses", + "InputMessages", + "InputMessagesTemplate", + "InputMessagesTemplateTemplate", + "InputMessagesTemplateTemplateChatMessage", + "InputMessagesTemplateTemplateEvalItem", + "InputMessagesTemplateTemplateEvalItemContent", + "InputMessagesTemplateTemplateEvalItemContentOutputText", + "InputMessagesItemReference", + "SamplingParams", +] + + +class SourceFileContentContent(TypedDict, total=False): + item: Required[Dict[str, object]] + + sample: Dict[str, object] + + +class SourceFileContent(TypedDict, total=False): + content: Required[Iterable[SourceFileContentContent]] + """The content of the jsonl file.""" + + type: Required[Literal["file_content"]] + """The type of jsonl source. Always `file_content`.""" + + +class SourceFileID(TypedDict, total=False): + id: Required[str] + """The identifier of the file.""" + + type: Required[Literal["file_id"]] + """The type of jsonl source. Always `file_id`.""" + + +class SourceResponses(TypedDict, total=False): + type: Required[Literal["responses"]] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] + """List of user identifiers. This is a query parameter used to select responses.""" + + +Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceResponses] + + +class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False): + content: Required[str] + """The content of the message.""" + + role: Required[str] + """The role of the message (e.g. "system", "assistant", "user").""" + + +class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, ResponseInputTextParam, InputMessagesTemplateTemplateEvalItemContentOutputText +] + + +class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + content: Required[InputMessagesTemplateTemplateEvalItemContent] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" + + +InputMessagesTemplateTemplate: TypeAlias = Union[ + InputMessagesTemplateTemplateChatMessage, InputMessagesTemplateTemplateEvalItem +] + + +class InputMessagesTemplate(TypedDict, total=False): + template: Required[Iterable[InputMessagesTemplateTemplate]] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Required[Literal["template"]] + """The type of input messages. Always `template`.""" + + +class InputMessagesItemReference(TypedDict, total=False): + item_reference: Required[str] + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Required[Literal["item_reference"]] + """The type of input messages. Always `item_reference`.""" + + +InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference] + + +class SamplingParams(TypedDict, total=False): + max_completion_tokens: int + """The maximum number of tokens in the generated output.""" + + seed: int + """A seed value to initialize the randomness, during sampling.""" + + temperature: float + """A higher temperature increases randomness in the outputs.""" + + top_p: float + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class CreateEvalResponsesRunDataSourceParam(TypedDict, total=False): + source: Required[Source] + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Required[Literal["responses"]] + """The type of run data source. Always `responses`.""" + + input_messages: InputMessages + + model: str + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: SamplingParams diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index eb6d689fc3..a49989b60f 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,224 +9,14 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata -from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = [ - "RunCancelResponse", - "DataSource", - "DataSourceCompletions", - "DataSourceCompletionsSource", - "DataSourceCompletionsSourceFileContent", - "DataSourceCompletionsSourceFileContentContent", - "DataSourceCompletionsSourceFileID", - "DataSourceCompletionsSourceResponses", - "DataSourceCompletionsInputMessages", - "DataSourceCompletionsInputMessagesTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText", - "DataSourceCompletionsInputMessagesItemReference", - "DataSourceCompletionsSamplingParams", - "PerModelUsage", - "PerTestingCriteriaResult", - "ResultCounts", -] - - -class DataSourceCompletionsSourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class DataSourceCompletionsSourceFileContent(BaseModel): - content: List[DataSourceCompletionsSourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class DataSourceCompletionsSourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - -class DataSourceCompletionsSourceResponses(BaseModel): - type: Literal["responses"] - """The type of run data source. Always `responses`.""" - - allow_parallel_tool_calls: Optional[bool] = None - """Whether to allow parallel tool calls. - - This is a query parameter used to select responses. - """ - - created_after: Optional[int] = None - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] = None - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] = None - """Optional search string for instructions. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] = None - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] = None - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] = None - """Sampling temperature. This is a query parameter used to select responses.""" - - top_p: Optional[float] = None - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] = None - """List of user identifiers. This is a query parameter used to select responses.""" - - -DataSourceCompletionsSource: TypeAlias = Annotated[ - Union[ - DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses - ], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel): - content: str - """The content of the message.""" - - role: str - """The role of the message (e.g. "system", "assistant", "user").""" - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel): - content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[ - DataSourceCompletionsInputMessagesTemplateTemplateChatMessage, - DataSourceCompletionsInputMessagesTemplateTemplateEvalItem, -] - - -class DataSourceCompletionsInputMessagesTemplate(BaseModel): - template: List[DataSourceCompletionsInputMessagesTemplateTemplate] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Literal["template"] - """The type of input messages. Always `template`.""" - - -class DataSourceCompletionsInputMessagesItemReference(BaseModel): - item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Literal["item_reference"] - """The type of input messages. Always `item_reference`.""" - - -DataSourceCompletionsInputMessages: TypeAlias = Annotated[ - Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsSamplingParams(BaseModel): - max_completion_tokens: Optional[int] = None - """The maximum number of tokens in the generated output.""" - - seed: Optional[int] = None - """A seed value to initialize the randomness, during sampling.""" - - temperature: Optional[float] = None - """A higher temperature increases randomness in the outputs.""" - - top_p: Optional[float] = None - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class DataSourceCompletions(BaseModel): - source: DataSourceCompletionsSource - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Literal["completions"] - """The type of run data source. Always `completions`.""" - - input_messages: Optional[DataSourceCompletionsInputMessages] = None - - model: Optional[str] = None - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: Optional[DataSourceCompletionsSamplingParams] = None - +__all__ = ["RunCancelResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index 0c9720ea7a..00c7398748 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -2,34 +2,15 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import Union, Optional +from typing_extensions import Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata -from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text_param import ResponseInputTextParam from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam +from .create_eval_responses_run_data_source_param import CreateEvalResponsesRunDataSourceParam from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam -__all__ = [ - "RunCreateParams", - "DataSource", - "DataSourceCreateEvalResponsesRunDataSource", - "DataSourceCreateEvalResponsesRunDataSourceSource", - "DataSourceCreateEvalResponsesRunDataSourceSourceFileContent", - "DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent", - "DataSourceCreateEvalResponsesRunDataSourceSourceFileID", - "DataSourceCreateEvalResponsesRunDataSourceSourceResponses", - "DataSourceCreateEvalResponsesRunDataSourceInputMessages", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText", - "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference", - "DataSourceCreateEvalResponsesRunDataSourceSamplingParams", -] +__all__ = ["RunCreateParams", "DataSource"] class RunCreateParams(TypedDict, total=False): @@ -50,198 +31,6 @@ class RunCreateParams(TypedDict, total=False): """The name of the run.""" -class DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent(TypedDict, total=False): - item: Required[Dict[str, object]] - - sample: Dict[str, object] - - -class DataSourceCreateEvalResponsesRunDataSourceSourceFileContent(TypedDict, total=False): - content: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent]] - """The content of the jsonl file.""" - - type: Required[Literal["file_content"]] - """The type of jsonl source. Always `file_content`.""" - - -class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=False): - id: Required[str] - """The identifier of the file.""" - - type: Required[Literal["file_id"]] - """The type of jsonl source. Always `file_id`.""" - - -class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False): - type: Required[Literal["responses"]] - """The type of run data source. Always `responses`.""" - - allow_parallel_tool_calls: Optional[bool] - """Whether to allow parallel tool calls. - - This is a query parameter used to select responses. - """ - - created_after: Optional[int] - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] - """Optional search string for instructions. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] - """Sampling temperature. This is a query parameter used to select responses.""" - - top_p: Optional[float] - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] - """List of user identifiers. This is a query parameter used to select responses.""" - - -DataSourceCreateEvalResponsesRunDataSourceSource: TypeAlias = Union[ - DataSourceCreateEvalResponsesRunDataSourceSourceFileContent, - DataSourceCreateEvalResponsesRunDataSourceSourceFileID, - DataSourceCreateEvalResponsesRunDataSourceSourceResponses, -] - - -class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage(TypedDict, total=False): - content: Required[str] - """The content of the message.""" - - role: Required[str] - """The role of the message (e.g. "system", "assistant", "user").""" - - -class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText( - TypedDict, total=False -): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, - ResponseInputTextParam, - DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText, -] - - -class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): - content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" - - -DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate: TypeAlias = Union[ - DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage, - DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem, -] - - -class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, total=False): - template: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate]] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Required[Literal["template"]] - """The type of input messages. Always `template`.""" - - -class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(TypedDict, total=False): - item_reference: Required[str] - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Required[Literal["item_reference"]] - """The type of input messages. Always `item_reference`.""" - - -DataSourceCreateEvalResponsesRunDataSourceInputMessages: TypeAlias = Union[ - DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate, - DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference, -] - - -class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=False): - max_completion_tokens: int - """The maximum number of tokens in the generated output.""" - - seed: int - """A seed value to initialize the randomness, during sampling.""" - - temperature: float - """A higher temperature increases randomness in the outputs.""" - - top_p: float - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False): - source: Required[DataSourceCreateEvalResponsesRunDataSourceSource] - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Required[Literal["completions"]] - """The type of run data source. Always `completions`.""" - - input_messages: DataSourceCreateEvalResponsesRunDataSourceInputMessages - - model: str - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: DataSourceCreateEvalResponsesRunDataSourceSamplingParams - - DataSource: TypeAlias = Union[ - CreateEvalJSONLRunDataSourceParam, - CreateEvalCompletionsRunDataSourceParam, - DataSourceCreateEvalResponsesRunDataSource, + CreateEvalJSONLRunDataSourceParam, CreateEvalCompletionsRunDataSourceParam, CreateEvalResponsesRunDataSourceParam ] diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 459399511c..8dc64cf895 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,224 +9,14 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata -from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = [ - "RunCreateResponse", - "DataSource", - "DataSourceCompletions", - "DataSourceCompletionsSource", - "DataSourceCompletionsSourceFileContent", - "DataSourceCompletionsSourceFileContentContent", - "DataSourceCompletionsSourceFileID", - "DataSourceCompletionsSourceResponses", - "DataSourceCompletionsInputMessages", - "DataSourceCompletionsInputMessagesTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText", - "DataSourceCompletionsInputMessagesItemReference", - "DataSourceCompletionsSamplingParams", - "PerModelUsage", - "PerTestingCriteriaResult", - "ResultCounts", -] - - -class DataSourceCompletionsSourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class DataSourceCompletionsSourceFileContent(BaseModel): - content: List[DataSourceCompletionsSourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class DataSourceCompletionsSourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - -class DataSourceCompletionsSourceResponses(BaseModel): - type: Literal["responses"] - """The type of run data source. Always `responses`.""" - - allow_parallel_tool_calls: Optional[bool] = None - """Whether to allow parallel tool calls. - - This is a query parameter used to select responses. - """ - - created_after: Optional[int] = None - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] = None - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] = None - """Optional search string for instructions. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] = None - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] = None - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] = None - """Sampling temperature. This is a query parameter used to select responses.""" - - top_p: Optional[float] = None - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] = None - """List of user identifiers. This is a query parameter used to select responses.""" - - -DataSourceCompletionsSource: TypeAlias = Annotated[ - Union[ - DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses - ], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel): - content: str - """The content of the message.""" - - role: str - """The role of the message (e.g. "system", "assistant", "user").""" - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel): - content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[ - DataSourceCompletionsInputMessagesTemplateTemplateChatMessage, - DataSourceCompletionsInputMessagesTemplateTemplateEvalItem, -] - - -class DataSourceCompletionsInputMessagesTemplate(BaseModel): - template: List[DataSourceCompletionsInputMessagesTemplateTemplate] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Literal["template"] - """The type of input messages. Always `template`.""" - - -class DataSourceCompletionsInputMessagesItemReference(BaseModel): - item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Literal["item_reference"] - """The type of input messages. Always `item_reference`.""" - - -DataSourceCompletionsInputMessages: TypeAlias = Annotated[ - Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsSamplingParams(BaseModel): - max_completion_tokens: Optional[int] = None - """The maximum number of tokens in the generated output.""" - - seed: Optional[int] = None - """A seed value to initialize the randomness, during sampling.""" - - temperature: Optional[float] = None - """A higher temperature increases randomness in the outputs.""" - - top_p: Optional[float] = None - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class DataSourceCompletions(BaseModel): - source: DataSourceCompletionsSource - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Literal["completions"] - """The type of run data source. Always `completions`.""" - - input_messages: Optional[DataSourceCompletionsInputMessages] = None - - model: Optional[str] = None - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: Optional[DataSourceCompletionsSamplingParams] = None - +__all__ = ["RunCreateResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index 278ceeabed..0df3e5c7ad 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,224 +9,14 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata -from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = [ - "RunListResponse", - "DataSource", - "DataSourceCompletions", - "DataSourceCompletionsSource", - "DataSourceCompletionsSourceFileContent", - "DataSourceCompletionsSourceFileContentContent", - "DataSourceCompletionsSourceFileID", - "DataSourceCompletionsSourceResponses", - "DataSourceCompletionsInputMessages", - "DataSourceCompletionsInputMessagesTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText", - "DataSourceCompletionsInputMessagesItemReference", - "DataSourceCompletionsSamplingParams", - "PerModelUsage", - "PerTestingCriteriaResult", - "ResultCounts", -] - - -class DataSourceCompletionsSourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class DataSourceCompletionsSourceFileContent(BaseModel): - content: List[DataSourceCompletionsSourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class DataSourceCompletionsSourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - -class DataSourceCompletionsSourceResponses(BaseModel): - type: Literal["responses"] - """The type of run data source. Always `responses`.""" - - allow_parallel_tool_calls: Optional[bool] = None - """Whether to allow parallel tool calls. - - This is a query parameter used to select responses. - """ - - created_after: Optional[int] = None - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] = None - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] = None - """Optional search string for instructions. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] = None - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] = None - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] = None - """Sampling temperature. This is a query parameter used to select responses.""" - - top_p: Optional[float] = None - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] = None - """List of user identifiers. This is a query parameter used to select responses.""" - - -DataSourceCompletionsSource: TypeAlias = Annotated[ - Union[ - DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses - ], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel): - content: str - """The content of the message.""" - - role: str - """The role of the message (e.g. "system", "assistant", "user").""" - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel): - content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[ - DataSourceCompletionsInputMessagesTemplateTemplateChatMessage, - DataSourceCompletionsInputMessagesTemplateTemplateEvalItem, -] - - -class DataSourceCompletionsInputMessagesTemplate(BaseModel): - template: List[DataSourceCompletionsInputMessagesTemplateTemplate] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Literal["template"] - """The type of input messages. Always `template`.""" - - -class DataSourceCompletionsInputMessagesItemReference(BaseModel): - item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Literal["item_reference"] - """The type of input messages. Always `item_reference`.""" - - -DataSourceCompletionsInputMessages: TypeAlias = Annotated[ - Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsSamplingParams(BaseModel): - max_completion_tokens: Optional[int] = None - """The maximum number of tokens in the generated output.""" - - seed: Optional[int] = None - """A seed value to initialize the randomness, during sampling.""" - - temperature: Optional[float] = None - """A higher temperature increases randomness in the outputs.""" - - top_p: Optional[float] = None - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class DataSourceCompletions(BaseModel): - source: DataSourceCompletionsSource - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Literal["completions"] - """The type of run data source. Always `completions`.""" - - input_messages: Optional[DataSourceCompletionsInputMessages] = None - - model: Optional[str] = None - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: Optional[DataSourceCompletionsSamplingParams] = None - +__all__ = ["RunListResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index e142f31b14..35cdb04efc 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,224 +9,14 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata -from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource +from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = [ - "RunRetrieveResponse", - "DataSource", - "DataSourceCompletions", - "DataSourceCompletionsSource", - "DataSourceCompletionsSourceFileContent", - "DataSourceCompletionsSourceFileContentContent", - "DataSourceCompletionsSourceFileID", - "DataSourceCompletionsSourceResponses", - "DataSourceCompletionsInputMessages", - "DataSourceCompletionsInputMessagesTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplate", - "DataSourceCompletionsInputMessagesTemplateTemplateChatMessage", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItem", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent", - "DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText", - "DataSourceCompletionsInputMessagesItemReference", - "DataSourceCompletionsSamplingParams", - "PerModelUsage", - "PerTestingCriteriaResult", - "ResultCounts", -] - - -class DataSourceCompletionsSourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class DataSourceCompletionsSourceFileContent(BaseModel): - content: List[DataSourceCompletionsSourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class DataSourceCompletionsSourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - -class DataSourceCompletionsSourceResponses(BaseModel): - type: Literal["responses"] - """The type of run data source. Always `responses`.""" - - allow_parallel_tool_calls: Optional[bool] = None - """Whether to allow parallel tool calls. - - This is a query parameter used to select responses. - """ - - created_after: Optional[int] = None - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] = None - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] = None - """Optional search string for instructions. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] = None - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] = None - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] = None - """Sampling temperature. This is a query parameter used to select responses.""" - - top_p: Optional[float] = None - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] = None - """List of user identifiers. This is a query parameter used to select responses.""" - - -DataSourceCompletionsSource: TypeAlias = Annotated[ - Union[ - DataSourceCompletionsSourceFileContent, DataSourceCompletionsSourceFileID, DataSourceCompletionsSourceResponses - ], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateChatMessage(BaseModel): - content: str - """The content of the message.""" - - role: str - """The role of the message (e.g. "system", "assistant", "user").""" - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContentOutputText -] - - -class DataSourceCompletionsInputMessagesTemplateTemplateEvalItem(BaseModel): - content: DataSourceCompletionsInputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" - - -DataSourceCompletionsInputMessagesTemplateTemplate: TypeAlias = Union[ - DataSourceCompletionsInputMessagesTemplateTemplateChatMessage, - DataSourceCompletionsInputMessagesTemplateTemplateEvalItem, -] - - -class DataSourceCompletionsInputMessagesTemplate(BaseModel): - template: List[DataSourceCompletionsInputMessagesTemplateTemplate] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Literal["template"] - """The type of input messages. Always `template`.""" - - -class DataSourceCompletionsInputMessagesItemReference(BaseModel): - item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Literal["item_reference"] - """The type of input messages. Always `item_reference`.""" - - -DataSourceCompletionsInputMessages: TypeAlias = Annotated[ - Union[DataSourceCompletionsInputMessagesTemplate, DataSourceCompletionsInputMessagesItemReference], - PropertyInfo(discriminator="type"), -] - - -class DataSourceCompletionsSamplingParams(BaseModel): - max_completion_tokens: Optional[int] = None - """The maximum number of tokens in the generated output.""" - - seed: Optional[int] = None - """A seed value to initialize the randomness, during sampling.""" - - temperature: Optional[float] = None - """A higher temperature increases randomness in the outputs.""" - - top_p: Optional[float] = None - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class DataSourceCompletions(BaseModel): - source: DataSourceCompletionsSource - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Literal["completions"] - """The type of run data source. Always `completions`.""" - - input_messages: Optional[DataSourceCompletionsInputMessages] = None - - model: Optional[str] = None - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: Optional[DataSourceCompletionsSamplingParams] = None - +__all__ = ["RunRetrieveResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceCompletions], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/fine_tuning/fine_tuning_job.py b/src/openai/types/fine_tuning/fine_tuning_job.py index f626fbba64..b6123f8ba6 100644 --- a/src/openai/types/fine_tuning/fine_tuning_job.py +++ b/src/openai/types/fine_tuning/fine_tuning_job.py @@ -28,7 +28,7 @@ class Error(BaseModel): class Hyperparameters(BaseModel): - batch_size: Union[Literal["auto"], int, None] = None + batch_size: Union[Literal["auto"], int, Optional[object], None] = None """Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but diff --git a/tests/api_resources/audio/test_transcriptions.py b/tests/api_resources/audio/test_transcriptions.py index 19215e11df..753acdecf6 100644 --- a/tests/api_resources/audio/test_transcriptions.py +++ b/tests/api_resources/audio/test_transcriptions.py @@ -30,6 +30,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: transcription = client.audio.transcriptions.create( file=b"raw file contents", model="gpt-4o-transcribe", + chunking_strategy="auto", include=["logprobs"], language="language", prompt="prompt", @@ -81,6 +82,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: file=b"raw file contents", model="gpt-4o-transcribe", stream=True, + chunking_strategy="auto", include=["logprobs"], language="language", prompt="prompt", @@ -134,6 +136,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn transcription = await async_client.audio.transcriptions.create( file=b"raw file contents", model="gpt-4o-transcribe", + chunking_strategy="auto", include=["logprobs"], language="language", prompt="prompt", @@ -185,6 +188,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn file=b"raw file contents", model="gpt-4o-transcribe", stream=True, + chunking_strategy="auto", include=["logprobs"], language="language", prompt="prompt", From 25245e5e3d0713abfb65b760aee1f12bc61deb41 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Thu, 15 May 2025 23:47:25 +0000 Subject: [PATCH 4/8] feat(api): manual updates --- .stats.yml | 2 +- api.md | 5 ++ .../resources/beta/threads/runs/runs.py | 17 ++--- src/openai/resources/beta/threads/threads.py | 17 ++--- .../resources/vector_stores/vector_stores.py | 9 +-- src/openai/types/__init__.py | 3 + src/openai/types/beta/__init__.py | 2 + .../beta/thread_create_and_run_params.py | 21 +----- src/openai/types/beta/threads/run.py | 30 +-------- .../types/beta/threads/run_create_params.py | 21 +----- src/openai/types/beta/truncation_object.py | 25 +++++++ .../types/beta/truncation_object_param.py | 25 +++++++ src/openai/types/eval_create_params.py | 36 +--------- src/openai/types/evals/__init__.py | 4 ++ ...create_eval_completions_run_data_source.py | 67 ++----------------- ..._eval_completions_run_data_source_param.py | 66 ++---------------- .../create_eval_jsonl_run_data_source.py | 33 ++------- ...create_eval_jsonl_run_data_source_param.py | 36 ++-------- .../create_eval_responses_run_data_source.py | 67 ++----------------- ...te_eval_responses_run_data_source_param.py | 67 ++----------------- .../evals/eval_jsonl_file_content_source.py | 22 ++++++ .../eval_jsonl_file_content_source_param.py | 22 ++++++ .../types/evals/eval_jsonl_file_id_source.py | 15 +++++ .../evals/eval_jsonl_file_id_source_param.py | 15 +++++ .../types/graders/label_model_grader.py | 35 ++-------- .../types/graders/label_model_grader_param.py | 35 ++-------- .../types/graders/score_model_grader.py | 35 ++-------- .../types/graders/score_model_grader_param.py | 35 ++-------- src/openai/types/shared/__init__.py | 1 + src/openai/types/shared/eval_item.py | 34 ++++++++++ src/openai/types/shared_params/__init__.py | 1 + src/openai/types/shared_params/eval_item.py | 35 ++++++++++ src/openai/types/vector_store.py | 16 +---- .../types/vector_store_create_params.py | 18 ++--- .../types/vector_store_expiration_after.py | 18 +++++ .../vector_store_expiration_after_param.py | 18 +++++ .../types/vector_store_update_params.py | 18 ++--- 37 files changed, 346 insertions(+), 580 deletions(-) create mode 100644 src/openai/types/beta/truncation_object.py create mode 100644 src/openai/types/beta/truncation_object_param.py create mode 100644 src/openai/types/evals/eval_jsonl_file_content_source.py create mode 100644 src/openai/types/evals/eval_jsonl_file_content_source_param.py create mode 100644 src/openai/types/evals/eval_jsonl_file_id_source.py create mode 100644 src/openai/types/evals/eval_jsonl_file_id_source_param.py create mode 100644 src/openai/types/shared/eval_item.py create mode 100644 src/openai/types/shared_params/eval_item.py create mode 100644 src/openai/types/vector_store_expiration_after.py create mode 100644 src/openai/types/vector_store_expiration_after_param.py diff --git a/.stats.yml b/.stats.yml index 11ba2b0101..202b915dc8 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 -config_hash: 7da27f7260075e8813ddcea542fba1bf +config_hash: bdacc55eb995c15255ec82130eb8c3bb diff --git a/api.md b/api.md index db505b20d1..869b7d5042 100644 --- a/api.md +++ b/api.md @@ -7,6 +7,7 @@ from openai.types import ( ComparisonFilter, CompoundFilter, ErrorObject, + EvalItem, FunctionDefinition, FunctionParameters, Metadata, @@ -343,6 +344,7 @@ from openai.types import ( StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreDeleted, + VectorStoreExpirationAfter, VectorStoreSearchResponse, ) ``` @@ -519,6 +521,7 @@ from openai.types.beta import ( AssistantToolChoiceOption, Thread, ThreadDeleted, + TruncationObject, ) ``` @@ -815,6 +818,8 @@ from openai.types.evals import ( CreateEvalJSONLRunDataSource, CreateEvalResponsesRunDataSource, EvalAPIError, + EvalJSONLFileContentSource, + EvalJSONLFileIDSource, RunCreateResponse, RunRetrieveResponse, RunListResponse, diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index 4d19010fea..f59fda8d5f 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -51,6 +51,7 @@ from .....types.shared.reasoning_effort import ReasoningEffort from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent +from .....types.beta.truncation_object_param import TruncationObjectParam from .....types.beta.threads.runs.run_step_include import RunStepInclude from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -104,7 +105,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -254,7 +255,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -404,7 +405,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -554,7 +555,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1515,7 +1516,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1665,7 +1666,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1815,7 +1816,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1965,7 +1966,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index 13d8cb6411..ec5a8ea2cf 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -52,6 +52,7 @@ from ....types.shared_params.metadata import Metadata from ....types.beta.assistant_tool_param import AssistantToolParam from ....types.beta.assistant_stream_event import AssistantStreamEvent +from ....types.beta.truncation_object_param import TruncationObjectParam from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -285,7 +286,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -418,7 +419,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -551,7 +552,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -684,7 +685,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1134,7 +1135,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1267,7 +1268,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1400,7 +1401,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1533,7 +1534,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/vector_stores/vector_stores.py b/src/openai/resources/vector_stores/vector_stores.py index 9fc17b183b..7f353af080 100644 --- a/src/openai/resources/vector_stores/vector_stores.py +++ b/src/openai/resources/vector_stores/vector_stores.py @@ -43,6 +43,7 @@ from ...types.shared_params.metadata import Metadata from ...types.file_chunking_strategy_param import FileChunkingStrategyParam from ...types.vector_store_search_response import VectorStoreSearchResponse +from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam __all__ = ["VectorStores", "AsyncVectorStores"] @@ -79,7 +80,7 @@ def create( self, *, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, - expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, @@ -177,7 +178,7 @@ def update( self, vector_store_id: str, *, - expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, + expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -424,7 +425,7 @@ async def create( self, *, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, - expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, + expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, @@ -522,7 +523,7 @@ async def update( self, vector_store_id: str, *, - expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, + expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index 9f40033354..de6665155f 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -6,6 +6,7 @@ from .image import Image as Image from .model import Model as Model from .shared import ( + EvalItem as EvalItem, Metadata as Metadata, AllModels as AllModels, ChatModel as ChatModel, @@ -76,12 +77,14 @@ from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy +from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam +from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject from .eval_stored_completions_data_source_config import ( EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index 5ba3eadf3c..bfcaed7532 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -9,6 +9,7 @@ from .thread_deleted import ThreadDeleted as ThreadDeleted from .file_search_tool import FileSearchTool as FileSearchTool from .assistant_deleted import AssistantDeleted as AssistantDeleted +from .truncation_object import TruncationObject as TruncationObject from .function_tool_param import FunctionToolParam as FunctionToolParam from .assistant_tool_param import AssistantToolParam as AssistantToolParam from .thread_create_params import ThreadCreateParams as ThreadCreateParams @@ -20,6 +21,7 @@ from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams +from .truncation_object_param import TruncationObjectParam as TruncationObjectParam from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index d813710579..7ba71b0ba3 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -8,6 +8,7 @@ from ..shared.chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata +from .truncation_object_param import TruncationObjectParam from .code_interpreter_tool_param import CodeInterpreterToolParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .threads.message_content_part_param import MessageContentPartParam @@ -31,7 +32,6 @@ "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", - "TruncationStrategy", "ThreadCreateAndRunParamsNonStreaming", "ThreadCreateAndRunParamsStreaming", ] @@ -166,7 +166,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): We generally recommend altering this or temperature but not both. """ - truncation_strategy: Optional[TruncationStrategy] + truncation_strategy: Optional[TruncationObjectParam] """Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -358,23 +358,6 @@ class ToolResources(TypedDict, total=False): file_search: ToolResourcesFileSearch -class TruncationStrategy(TypedDict, total=False): - type: Required[Literal["auto", "last_messages"]] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ - - class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index da9418d6f9..e5a7808417 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -7,19 +7,12 @@ from .run_status import RunStatus from ..assistant_tool import AssistantTool from ...shared.metadata import Metadata +from ..truncation_object import TruncationObject from ..assistant_tool_choice_option import AssistantToolChoiceOption from ..assistant_response_format_option import AssistantResponseFormatOption from .required_action_function_tool_call import RequiredActionFunctionToolCall -__all__ = [ - "Run", - "IncompleteDetails", - "LastError", - "RequiredAction", - "RequiredActionSubmitToolOutputs", - "TruncationStrategy", - "Usage", -] +__all__ = ["Run", "IncompleteDetails", "LastError", "RequiredAction", "RequiredActionSubmitToolOutputs", "Usage"] class IncompleteDetails(BaseModel): @@ -52,23 +45,6 @@ class RequiredAction(BaseModel): """For now, this is always `submit_tool_outputs`.""" -class TruncationStrategy(BaseModel): - type: Literal["auto", "last_messages"] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] = None - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ - - class Usage(BaseModel): completion_tokens: int """Number of completion tokens used over the course of the run.""" @@ -225,7 +201,7 @@ class Run(BaseModel): this run. """ - truncation_strategy: Optional[TruncationStrategy] = None + truncation_strategy: Optional[TruncationObject] = None """Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index fc70227862..80656aada4 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -9,6 +9,7 @@ from ..assistant_tool_param import AssistantToolParam from .runs.run_step_include import RunStepInclude from ...shared_params.metadata import Metadata +from ..truncation_object_param import TruncationObjectParam from ...shared.reasoning_effort import ReasoningEffort from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam @@ -21,7 +22,6 @@ "AdditionalMessageAttachment", "AdditionalMessageAttachmentTool", "AdditionalMessageAttachmentToolFileSearch", - "TruncationStrategy", "RunCreateParamsNonStreaming", "RunCreateParamsStreaming", ] @@ -173,7 +173,7 @@ class RunCreateParamsBase(TypedDict, total=False): We generally recommend altering this or temperature but not both. """ - truncation_strategy: Optional[TruncationStrategy] + truncation_strategy: Optional[TruncationObjectParam] """Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -223,23 +223,6 @@ class AdditionalMessage(TypedDict, total=False): """ -class TruncationStrategy(TypedDict, total=False): - type: Required[Literal["auto", "last_messages"]] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ - - class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/openai/types/beta/truncation_object.py b/src/openai/types/beta/truncation_object.py new file mode 100644 index 0000000000..7c81b3b5bc --- /dev/null +++ b/src/openai/types/beta/truncation_object.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["TruncationObject"] + + +class TruncationObject(BaseModel): + type: Literal["auto", "last_messages"] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] = None + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ diff --git a/src/openai/types/beta/truncation_object_param.py b/src/openai/types/beta/truncation_object_param.py new file mode 100644 index 0000000000..98d942fa09 --- /dev/null +++ b/src/openai/types/beta/truncation_object_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["TruncationObjectParam"] + + +class TruncationObjectParam(TypedDict, total=False): + type: Required[Literal["auto", "last_messages"]] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index 8d508a2d8e..95fd0bb8d8 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -6,10 +6,10 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from .shared_params.metadata import Metadata +from .shared_params.eval_item import EvalItem from .graders.python_grader_param import PythonGraderParam from .graders.score_model_grader_param import ScoreModelGraderParam from .graders.string_check_grader_param import StringCheckGraderParam -from .responses.response_input_text_param import ResponseInputTextParam from .graders.text_similarity_grader_param import TextSimilarityGraderParam __all__ = [ @@ -22,9 +22,6 @@ "TestingCriterionLabelModel", "TestingCriterionLabelModelInput", "TestingCriterionLabelModelInputSimpleInputMessage", - "TestingCriterionLabelModelInputEvalItem", - "TestingCriterionLabelModelInputEvalItemContent", - "TestingCriterionLabelModelInputEvalItemContentOutputText", "TestingCriterionTextSimilarity", "TestingCriterionPython", "TestingCriterionScoreModel", @@ -93,36 +90,7 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False): """The role of the message (e.g. "system", "assistant", "user").""" -class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[ - str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText -] - - -class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): - content: Required[TestingCriterionLabelModelInputEvalItemContent] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" - - -TestingCriterionLabelModelInput: TypeAlias = Union[ - TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem -] +TestingCriterionLabelModelInput: TypeAlias = Union[TestingCriterionLabelModelInputSimpleInputMessage, EvalItem] class TestingCriterionLabelModel(TypedDict, total=False): diff --git a/src/openai/types/evals/__init__.py b/src/openai/types/evals/__init__.py index 9d26c7d915..7841a40382 100644 --- a/src/openai/types/evals/__init__.py +++ b/src/openai/types/evals/__init__.py @@ -10,7 +10,11 @@ from .run_create_response import RunCreateResponse as RunCreateResponse from .run_delete_response import RunDeleteResponse as RunDeleteResponse from .run_retrieve_response import RunRetrieveResponse as RunRetrieveResponse +from .eval_jsonl_file_id_source import EvalJSONLFileIDSource as EvalJSONLFileIDSource +from .eval_jsonl_file_content_source import EvalJSONLFileContentSource as EvalJSONLFileContentSource +from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam as EvalJSONLFileIDSourceParam from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource +from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam as EvalJSONLFileContentSourceParam from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import ( CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 29c687b542..439fcc5d7b 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -1,54 +1,28 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata +from ..shared.eval_item import EvalItem +from .eval_jsonl_file_id_source import EvalJSONLFileIDSource from ..responses.easy_input_message import EasyInputMessage -from ..responses.response_input_text import ResponseInputText +from .eval_jsonl_file_content_source import EvalJSONLFileContentSource __all__ = [ "CreateEvalCompletionsRunDataSource", "Source", - "SourceFileContent", - "SourceFileContentContent", - "SourceFileID", "SourceStoredCompletions", "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateMessage", - "InputMessagesTemplateTemplateMessageContent", - "InputMessagesTemplateTemplateMessageContentOutputText", "InputMessagesItemReference", "SamplingParams", ] -class SourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class SourceFileContent(BaseModel): - content: List[SourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class SourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - class SourceStoredCompletions(BaseModel): type: Literal["stored_completions"] """The type of source. Always `stored_completions`.""" @@ -77,39 +51,12 @@ class SourceStoredCompletions(BaseModel): Source: TypeAlias = Annotated[ - Union[SourceFileContent, SourceFileID, SourceStoredCompletions], PropertyInfo(discriminator="type") -] - - -class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText + Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceStoredCompletions], + PropertyInfo(discriminator="type"), ] - -class InputMessagesTemplateTemplateMessage(BaseModel): - content: InputMessagesTemplateTemplateMessageContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" - - InputMessagesTemplateTemplate: TypeAlias = Annotated[ - Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type") + Union[EasyInputMessage, EvalItem], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index c53064ee27..e94443d953 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -2,53 +2,27 @@ from __future__ import annotations -from typing import Dict, Union, Iterable, Optional +from typing import Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata +from ..shared_params.eval_item import EvalItem +from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam from ..responses.easy_input_message_param import EasyInputMessageParam -from ..responses.response_input_text_param import ResponseInputTextParam +from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam __all__ = [ "CreateEvalCompletionsRunDataSourceParam", "Source", - "SourceFileContent", - "SourceFileContentContent", - "SourceFileID", "SourceStoredCompletions", "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateMessage", - "InputMessagesTemplateTemplateMessageContent", - "InputMessagesTemplateTemplateMessageContentOutputText", "InputMessagesItemReference", "SamplingParams", ] -class SourceFileContentContent(TypedDict, total=False): - item: Required[Dict[str, object]] - - sample: Dict[str, object] - - -class SourceFileContent(TypedDict, total=False): - content: Required[Iterable[SourceFileContentContent]] - """The content of the jsonl file.""" - - type: Required[Literal["file_content"]] - """The type of jsonl source. Always `file_content`.""" - - -class SourceFileID(TypedDict, total=False): - id: Required[str] - """The identifier of the file.""" - - type: Required[Literal["file_id"]] - """The type of jsonl source. Always `file_id`.""" - - class SourceStoredCompletions(TypedDict, total=False): type: Required[Literal["stored_completions"]] """The type of source. Always `stored_completions`.""" @@ -76,37 +50,9 @@ class SourceStoredCompletions(TypedDict, total=False): """An optional model to filter by (e.g., 'gpt-4o').""" -Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions] - - -class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ - str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText -] - - -class InputMessagesTemplateTemplateMessage(TypedDict, total=False): - content: Required[InputMessagesTemplateTemplateMessageContent] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" - +Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceStoredCompletions] -InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage] +InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, EvalItem] class InputMessagesTemplate(TypedDict, total=False): diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source.py b/src/openai/types/evals/create_eval_jsonl_run_data_source.py index d2be56243b..03c6550744 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source.py @@ -1,37 +1,18 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import Union from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo from ..._models import BaseModel +from .eval_jsonl_file_id_source import EvalJSONLFileIDSource +from .eval_jsonl_file_content_source import EvalJSONLFileContentSource -__all__ = ["CreateEvalJSONLRunDataSource", "Source", "SourceFileContent", "SourceFileContentContent", "SourceFileID"] +__all__ = ["CreateEvalJSONLRunDataSource", "Source"] - -class SourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class SourceFileContent(BaseModel): - content: List[SourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class SourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - -Source: TypeAlias = Annotated[Union[SourceFileContent, SourceFileID], PropertyInfo(discriminator="type")] +Source: TypeAlias = Annotated[ + Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource], PropertyInfo(discriminator="type") +] class CreateEvalJSONLRunDataSource(BaseModel): diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py index b8ba48a666..cc71925782 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py @@ -2,41 +2,15 @@ from __future__ import annotations -from typing import Dict, Union, Iterable +from typing import Union from typing_extensions import Literal, Required, TypeAlias, TypedDict -__all__ = [ - "CreateEvalJSONLRunDataSourceParam", - "Source", - "SourceFileContent", - "SourceFileContentContent", - "SourceFileID", -] +from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam +from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam +__all__ = ["CreateEvalJSONLRunDataSourceParam", "Source"] -class SourceFileContentContent(TypedDict, total=False): - item: Required[Dict[str, object]] - - sample: Dict[str, object] - - -class SourceFileContent(TypedDict, total=False): - content: Required[Iterable[SourceFileContentContent]] - """The content of the jsonl file.""" - - type: Required[Literal["file_content"]] - """The type of jsonl source. Always `file_content`.""" - - -class SourceFileID(TypedDict, total=False): - id: Required[str] - """The identifier of the file.""" - - type: Required[Literal["file_id"]] - """The type of jsonl source. Always `file_id`.""" - - -Source: TypeAlias = Union[SourceFileContent, SourceFileID] +Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam] class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False): diff --git a/src/openai/types/evals/create_eval_responses_run_data_source.py b/src/openai/types/evals/create_eval_responses_run_data_source.py index 481fd0761e..268eab2173 100644 --- a/src/openai/types/evals/create_eval_responses_run_data_source.py +++ b/src/openai/types/evals/create_eval_responses_run_data_source.py @@ -1,54 +1,28 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Dict, List, Union, Optional +from typing import List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo from ..._models import BaseModel +from ..shared.eval_item import EvalItem from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text import ResponseInputText +from .eval_jsonl_file_id_source import EvalJSONLFileIDSource +from .eval_jsonl_file_content_source import EvalJSONLFileContentSource __all__ = [ "CreateEvalResponsesRunDataSource", "Source", - "SourceFileContent", - "SourceFileContentContent", - "SourceFileID", "SourceResponses", "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", "InputMessagesTemplateTemplateChatMessage", - "InputMessagesTemplateTemplateEvalItem", - "InputMessagesTemplateTemplateEvalItemContent", - "InputMessagesTemplateTemplateEvalItemContentOutputText", "InputMessagesItemReference", "SamplingParams", ] -class SourceFileContentContent(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class SourceFileContent(BaseModel): - content: List[SourceFileContentContent] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" - - -class SourceFileID(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" - - class SourceResponses(BaseModel): type: Literal["responses"] """The type of run data source. Always `responses`.""" @@ -109,7 +83,7 @@ class SourceResponses(BaseModel): Source: TypeAlias = Annotated[ - Union[SourceFileContent, SourceFileID, SourceResponses], PropertyInfo(discriminator="type") + Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceResponses], PropertyInfo(discriminator="type") ] @@ -121,36 +95,7 @@ class InputMessagesTemplateTemplateChatMessage(BaseModel): """The role of the message (e.g. "system", "assistant", "user").""" -class InputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputText, InputMessagesTemplateTemplateEvalItemContentOutputText -] - - -class InputMessagesTemplateTemplateEvalItem(BaseModel): - content: InputMessagesTemplateTemplateEvalItemContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" - - -InputMessagesTemplateTemplate: TypeAlias = Union[ - InputMessagesTemplateTemplateChatMessage, InputMessagesTemplateTemplateEvalItem -] +InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem] class InputMessagesTemplate(BaseModel): diff --git a/src/openai/types/evals/create_eval_responses_run_data_source_param.py b/src/openai/types/evals/create_eval_responses_run_data_source_param.py index 9cde20de20..02d45a9e13 100644 --- a/src/openai/types/evals/create_eval_responses_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_responses_run_data_source_param.py @@ -2,53 +2,27 @@ from __future__ import annotations -from typing import Dict, List, Union, Iterable, Optional +from typing import List, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared.reasoning_effort import ReasoningEffort -from ..responses.response_input_text_param import ResponseInputTextParam +from ..shared_params.eval_item import EvalItem +from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam +from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam __all__ = [ "CreateEvalResponsesRunDataSourceParam", "Source", - "SourceFileContent", - "SourceFileContentContent", - "SourceFileID", "SourceResponses", "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", "InputMessagesTemplateTemplateChatMessage", - "InputMessagesTemplateTemplateEvalItem", - "InputMessagesTemplateTemplateEvalItemContent", - "InputMessagesTemplateTemplateEvalItemContentOutputText", "InputMessagesItemReference", "SamplingParams", ] -class SourceFileContentContent(TypedDict, total=False): - item: Required[Dict[str, object]] - - sample: Dict[str, object] - - -class SourceFileContent(TypedDict, total=False): - content: Required[Iterable[SourceFileContentContent]] - """The content of the jsonl file.""" - - type: Required[Literal["file_content"]] - """The type of jsonl source. Always `file_content`.""" - - -class SourceFileID(TypedDict, total=False): - id: Required[str] - """The identifier of the file.""" - - type: Required[Literal["file_id"]] - """The type of jsonl source. Always `file_id`.""" - - class SourceResponses(TypedDict, total=False): type: Required[Literal["responses"]] """The type of run data source. Always `responses`.""" @@ -108,7 +82,7 @@ class SourceResponses(TypedDict, total=False): """List of user identifiers. This is a query parameter used to select responses.""" -Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceResponses] +Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceResponses] class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False): @@ -119,36 +93,7 @@ class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False): """The role of the message (e.g. "system", "assistant", "user").""" -class InputMessagesTemplateTemplateEvalItemContentOutputText(TypedDict, total=False): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -InputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ - str, ResponseInputTextParam, InputMessagesTemplateTemplateEvalItemContentOutputText -] - - -class InputMessagesTemplateTemplateEvalItem(TypedDict, total=False): - content: Required[InputMessagesTemplateTemplateEvalItemContent] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" - - -InputMessagesTemplateTemplate: TypeAlias = Union[ - InputMessagesTemplateTemplateChatMessage, InputMessagesTemplateTemplateEvalItem -] +InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem] class InputMessagesTemplate(TypedDict, total=False): diff --git a/src/openai/types/evals/eval_jsonl_file_content_source.py b/src/openai/types/evals/eval_jsonl_file_content_source.py new file mode 100644 index 0000000000..b18fe8937b --- /dev/null +++ b/src/openai/types/evals/eval_jsonl_file_content_source.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Dict, List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["EvalJSONLFileContentSource", "Content"] + + +class Content(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class EvalJSONLFileContentSource(BaseModel): + content: List[Content] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" diff --git a/src/openai/types/evals/eval_jsonl_file_content_source_param.py b/src/openai/types/evals/eval_jsonl_file_content_source_param.py new file mode 100644 index 0000000000..a70f688762 --- /dev/null +++ b/src/openai/types/evals/eval_jsonl_file_content_source_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Iterable +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["EvalJSONLFileContentSourceParam", "Content"] + + +class Content(TypedDict, total=False): + item: Required[Dict[str, object]] + + sample: Dict[str, object] + + +class EvalJSONLFileContentSourceParam(TypedDict, total=False): + content: Required[Iterable[Content]] + """The content of the jsonl file.""" + + type: Required[Literal["file_content"]] + """The type of jsonl source. Always `file_content`.""" diff --git a/src/openai/types/evals/eval_jsonl_file_id_source.py b/src/openai/types/evals/eval_jsonl_file_id_source.py new file mode 100644 index 0000000000..2d317f2ce1 --- /dev/null +++ b/src/openai/types/evals/eval_jsonl_file_id_source.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["EvalJSONLFileIDSource"] + + +class EvalJSONLFileIDSource(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" diff --git a/src/openai/types/evals/eval_jsonl_file_id_source_param.py b/src/openai/types/evals/eval_jsonl_file_id_source_param.py new file mode 100644 index 0000000000..76b8662cd6 --- /dev/null +++ b/src/openai/types/evals/eval_jsonl_file_id_source_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["EvalJSONLFileIDSourceParam"] + + +class EvalJSONLFileIDSourceParam(TypedDict, total=False): + id: Required[str] + """The identifier of the file.""" + + type: Required[Literal["file_id"]] + """The type of jsonl source. Always `file_id`.""" diff --git a/src/openai/types/graders/label_model_grader.py b/src/openai/types/graders/label_model_grader.py index d95ccc6df6..16f5b5aa1b 100644 --- a/src/openai/types/graders/label_model_grader.py +++ b/src/openai/types/graders/label_model_grader.py @@ -1,41 +1,16 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias +from typing import List +from typing_extensions import Literal from ..._models import BaseModel -from ..responses.response_input_text import ResponseInputText +from ..shared.eval_item import EvalItem -__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"] - - -class InputContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] - - -class Input(BaseModel): - content: InputContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" +__all__ = ["LabelModelGrader"] class LabelModelGrader(BaseModel): - input: List[Input] + input: List[EvalItem] labels: List[str] """The labels to assign to each item in the evaluation.""" diff --git a/src/openai/types/graders/label_model_grader_param.py b/src/openai/types/graders/label_model_grader_param.py index 76d01421ee..34f5de7726 100644 --- a/src/openai/types/graders/label_model_grader_param.py +++ b/src/openai/types/graders/label_model_grader_param.py @@ -2,41 +2,16 @@ from __future__ import annotations -from typing import List, Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import List, Iterable +from typing_extensions import Literal, Required, TypedDict -from ..responses.response_input_text_param import ResponseInputTextParam +from ..shared_params.eval_item import EvalItem -__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"] - - -class InputContentOutputText(TypedDict, total=False): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] - - -class Input(TypedDict, total=False): - content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" +__all__ = ["LabelModelGraderParam"] class LabelModelGraderParam(TypedDict, total=False): - input: Required[Iterable[Input]] + input: Required[Iterable[EvalItem]] labels: Required[List[str]] """The labels to assign to each item in the evaluation.""" diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index 1349f75a58..6d81019c26 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -1,41 +1,16 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional -from typing_extensions import Literal, TypeAlias +from typing import List, Optional +from typing_extensions import Literal from ..._models import BaseModel -from ..responses.response_input_text import ResponseInputText +from ..shared.eval_item import EvalItem -__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"] - - -class InputContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] - - -class Input(BaseModel): - content: InputContent - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" +__all__ = ["ScoreModelGrader"] class ScoreModelGrader(BaseModel): - input: List[Input] + input: List[EvalItem] """The input text. This may include template strings.""" model: str diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index 673f14e47d..3e0b9d08eb 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -2,41 +2,16 @@ from __future__ import annotations -from typing import Union, Iterable -from typing_extensions import Literal, Required, TypeAlias, TypedDict +from typing import Iterable +from typing_extensions import Literal, Required, TypedDict -from ..responses.response_input_text_param import ResponseInputTextParam +from ..shared_params.eval_item import EvalItem -__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"] - - -class InputContentOutputText(TypedDict, total=False): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] - - -class Input(TypedDict, total=False): - content: Required[InputContent] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" +__all__ = ["ScoreModelGraderParam"] class ScoreModelGraderParam(TypedDict, total=False): - input: Required[Iterable[Input]] + input: Required[Iterable[EvalItem]] """The input text. This may include template strings.""" model: Required[str] diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index 6ad0ed5e01..10450d8c70 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata +from .eval_item import EvalItem as EvalItem from .reasoning import Reasoning as Reasoning from .all_models import AllModels as AllModels from .chat_model import ChatModel as ChatModel diff --git a/src/openai/types/shared/eval_item.py b/src/openai/types/shared/eval_item.py new file mode 100644 index 0000000000..f235d1ef17 --- /dev/null +++ b/src/openai/types/shared/eval_item.py @@ -0,0 +1,34 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Union, Optional +from typing_extensions import Literal, TypeAlias + +from ..._models import BaseModel +from ..responses.response_input_text import ResponseInputText + +__all__ = ["EvalItem", "Content", "ContentOutputText"] + + +class ContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +Content: TypeAlias = Union[str, ResponseInputText, ContentOutputText] + + +class EvalItem(BaseModel): + content: Content + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index 8894710807..68d16b90dc 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -1,6 +1,7 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata +from .eval_item import EvalItem as EvalItem from .reasoning import Reasoning as Reasoning from .chat_model import ChatModel as ChatModel from .compound_filter import CompoundFilter as CompoundFilter diff --git a/src/openai/types/shared_params/eval_item.py b/src/openai/types/shared_params/eval_item.py new file mode 100644 index 0000000000..7740ccc165 --- /dev/null +++ b/src/openai/types/shared_params/eval_item.py @@ -0,0 +1,35 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..responses.response_input_text_param import ResponseInputTextParam + +__all__ = ["EvalItem", "Content", "ContentOutputText"] + + +class ContentOutputText(TypedDict, total=False): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +Content: TypeAlias = Union[str, ResponseInputTextParam, ContentOutputText] + + +class EvalItem(TypedDict, total=False): + content: Required[Content] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" diff --git a/src/openai/types/vector_store.py b/src/openai/types/vector_store.py index 2473a442d2..2af120350e 100644 --- a/src/openai/types/vector_store.py +++ b/src/openai/types/vector_store.py @@ -5,8 +5,9 @@ from .._models import BaseModel from .shared.metadata import Metadata +from .vector_store_expiration_after import VectorStoreExpirationAfter -__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"] +__all__ = ["VectorStore", "FileCounts"] class FileCounts(BaseModel): @@ -26,17 +27,6 @@ class FileCounts(BaseModel): """The total number of files.""" -class ExpiresAfter(BaseModel): - anchor: Literal["last_active_at"] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: int - """The number of days after the anchor time that the vector store will expire.""" - - class VectorStore(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" @@ -75,7 +65,7 @@ class VectorStore(BaseModel): usage_bytes: int """The total number of bytes used by the files in the vector store.""" - expires_after: Optional[ExpiresAfter] = None + expires_after: Optional[VectorStoreExpirationAfter] = None """The expiration policy for a vector store.""" expires_at: Optional[int] = None diff --git a/src/openai/types/vector_store_create_params.py b/src/openai/types/vector_store_create_params.py index 365d0936b1..dbcedac188 100644 --- a/src/openai/types/vector_store_create_params.py +++ b/src/openai/types/vector_store_create_params.py @@ -3,12 +3,13 @@ from __future__ import annotations from typing import List, Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import TypedDict from .shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam +from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam -__all__ = ["VectorStoreCreateParams", "ExpiresAfter"] +__all__ = ["VectorStoreCreateParams"] class VectorStoreCreateParams(TypedDict, total=False): @@ -19,7 +20,7 @@ class VectorStoreCreateParams(TypedDict, total=False): non-empty. """ - expires_after: ExpiresAfter + expires_after: VectorStoreExpirationAfterParam """The expiration policy for a vector store.""" file_ids: List[str] @@ -41,14 +42,3 @@ class VectorStoreCreateParams(TypedDict, total=False): name: str """The name of the vector store.""" - - -class ExpiresAfter(TypedDict, total=False): - anchor: Required[Literal["last_active_at"]] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: Required[int] - """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/openai/types/vector_store_expiration_after.py b/src/openai/types/vector_store_expiration_after.py new file mode 100644 index 0000000000..1d417d526b --- /dev/null +++ b/src/openai/types/vector_store_expiration_after.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from .._models import BaseModel + +__all__ = ["VectorStoreExpirationAfter"] + + +class VectorStoreExpirationAfter(BaseModel): + anchor: Literal["last_active_at"] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: int + """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/openai/types/vector_store_expiration_after_param.py b/src/openai/types/vector_store_expiration_after_param.py new file mode 100644 index 0000000000..29a008c713 --- /dev/null +++ b/src/openai/types/vector_store_expiration_after_param.py @@ -0,0 +1,18 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["VectorStoreExpirationAfterParam"] + + +class VectorStoreExpirationAfterParam(TypedDict, total=False): + anchor: Required[Literal["last_active_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: Required[int] + """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/openai/types/vector_store_update_params.py b/src/openai/types/vector_store_update_params.py index 4f6ac63963..7c90784dfd 100644 --- a/src/openai/types/vector_store_update_params.py +++ b/src/openai/types/vector_store_update_params.py @@ -3,15 +3,16 @@ from __future__ import annotations from typing import Optional -from typing_extensions import Literal, Required, TypedDict +from typing_extensions import TypedDict from .shared_params.metadata import Metadata +from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam -__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"] +__all__ = ["VectorStoreUpdateParams"] class VectorStoreUpdateParams(TypedDict, total=False): - expires_after: Optional[ExpiresAfter] + expires_after: Optional[VectorStoreExpirationAfterParam] """The expiration policy for a vector store.""" metadata: Optional[Metadata] @@ -26,14 +27,3 @@ class VectorStoreUpdateParams(TypedDict, total=False): name: Optional[str] """The name of the vector store.""" - - -class ExpiresAfter(TypedDict, total=False): - anchor: Required[Literal["last_active_at"]] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: Required[int] - """The number of days after the anchor time that the vector store will expire.""" From 98ba7d355551213a13803f68d5642eecbb4ffd39 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 17:11:07 +0000 Subject: [PATCH 5/8] feat(api): Updating Assistants and Evals API schemas --- .stats.yml | 6 +- api.md | 7 - .../resources/beta/threads/runs/runs.py | 17 +- src/openai/resources/beta/threads/threads.py | 17 +- .../resources/vector_stores/vector_stores.py | 9 +- src/openai/types/__init__.py | 4 - src/openai/types/beta/__init__.py | 2 - .../beta/thread_create_and_run_params.py | 21 +- src/openai/types/beta/threads/run.py | 30 ++- .../types/beta/threads/run_create_params.py | 21 +- src/openai/types/beta/truncation_object.py | 25 -- .../types/beta/truncation_object_param.py | 25 -- src/openai/types/eval_create_params.py | 36 ++- src/openai/types/eval_create_response.py | 30 ++- src/openai/types/eval_list_response.py | 30 ++- .../types/eval_logs_data_source_config.py | 32 --- src/openai/types/eval_retrieve_response.py | 30 ++- src/openai/types/eval_update_response.py | 30 ++- src/openai/types/evals/__init__.py | 8 - ...create_eval_completions_run_data_source.py | 67 +++++- ..._eval_completions_run_data_source_param.py | 66 +++++- .../create_eval_jsonl_run_data_source.py | 33 ++- ...create_eval_jsonl_run_data_source_param.py | 36 ++- .../create_eval_responses_run_data_source.py | 151 ------------ ...te_eval_responses_run_data_source_param.py | 147 ------------ .../evals/eval_jsonl_file_content_source.py | 22 -- .../eval_jsonl_file_content_source_param.py | 22 -- .../types/evals/eval_jsonl_file_id_source.py | 15 -- .../evals/eval_jsonl_file_id_source_param.py | 15 -- src/openai/types/evals/run_cancel_response.py | 213 ++++++++++++++++- src/openai/types/evals/run_create_params.py | 218 +++++++++++++++++- src/openai/types/evals/run_create_response.py | 213 ++++++++++++++++- src/openai/types/evals/run_list_response.py | 213 ++++++++++++++++- .../types/evals/run_retrieve_response.py | 213 ++++++++++++++++- .../types/graders/label_model_grader.py | 35 ++- .../types/graders/label_model_grader_param.py | 35 ++- src/openai/types/graders/multi_grader.py | 2 +- .../types/graders/multi_grader_param.py | 2 +- .../types/graders/score_model_grader.py | 35 ++- .../types/graders/score_model_grader_param.py | 35 ++- src/openai/types/shared/__init__.py | 1 - src/openai/types/shared/chat_model.py | 1 + src/openai/types/shared/eval_item.py | 34 --- src/openai/types/shared_params/__init__.py | 1 - src/openai/types/shared_params/chat_model.py | 1 + src/openai/types/shared_params/eval_item.py | 35 --- src/openai/types/vector_store.py | 16 +- .../types/vector_store_create_params.py | 18 +- .../types/vector_store_expiration_after.py | 18 -- .../vector_store_expiration_after_param.py | 18 -- .../types/vector_store_update_params.py | 18 +- 51 files changed, 1621 insertions(+), 708 deletions(-) delete mode 100644 src/openai/types/beta/truncation_object.py delete mode 100644 src/openai/types/beta/truncation_object_param.py delete mode 100644 src/openai/types/eval_logs_data_source_config.py delete mode 100644 src/openai/types/evals/create_eval_responses_run_data_source.py delete mode 100644 src/openai/types/evals/create_eval_responses_run_data_source_param.py delete mode 100644 src/openai/types/evals/eval_jsonl_file_content_source.py delete mode 100644 src/openai/types/evals/eval_jsonl_file_content_source_param.py delete mode 100644 src/openai/types/evals/eval_jsonl_file_id_source.py delete mode 100644 src/openai/types/evals/eval_jsonl_file_id_source_param.py delete mode 100644 src/openai/types/shared/eval_item.py delete mode 100644 src/openai/types/shared_params/eval_item.py delete mode 100644 src/openai/types/vector_store_expiration_after.py delete mode 100644 src/openai/types/vector_store_expiration_after_param.py diff --git a/.stats.yml b/.stats.yml index 202b915dc8..a3c5d081d4 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-161ca7f1cfd7b33c1fc07d0ce25dfe4be5a7271c394f4cb526b7fb21b0729900.yml -openapi_spec_hash: 602e14add4bee018c6774e320ce309b8 -config_hash: bdacc55eb995c15255ec82130eb8c3bb +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml +openapi_spec_hash: da3e669f65130043b1170048c0727890 +config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/api.md b/api.md index 869b7d5042..496e5548b3 100644 --- a/api.md +++ b/api.md @@ -7,7 +7,6 @@ from openai.types import ( ComparisonFilter, CompoundFilter, ErrorObject, - EvalItem, FunctionDefinition, FunctionParameters, Metadata, @@ -344,7 +343,6 @@ from openai.types import ( StaticFileChunkingStrategyObjectParam, VectorStore, VectorStoreDeleted, - VectorStoreExpirationAfter, VectorStoreSearchResponse, ) ``` @@ -521,7 +519,6 @@ from openai.types.beta import ( AssistantToolChoiceOption, Thread, ThreadDeleted, - TruncationObject, ) ``` @@ -790,7 +787,6 @@ Types: ```python from openai.types import ( EvalCustomDataSourceConfig, - EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig, EvalCreateResponse, EvalRetrieveResponse, @@ -816,10 +812,7 @@ Types: from openai.types.evals import ( CreateEvalCompletionsRunDataSource, CreateEvalJSONLRunDataSource, - CreateEvalResponsesRunDataSource, EvalAPIError, - EvalJSONLFileContentSource, - EvalJSONLFileIDSource, RunCreateResponse, RunRetrieveResponse, RunListResponse, diff --git a/src/openai/resources/beta/threads/runs/runs.py b/src/openai/resources/beta/threads/runs/runs.py index f59fda8d5f..4d19010fea 100644 --- a/src/openai/resources/beta/threads/runs/runs.py +++ b/src/openai/resources/beta/threads/runs/runs.py @@ -51,7 +51,6 @@ from .....types.shared.reasoning_effort import ReasoningEffort from .....types.beta.assistant_tool_param import AssistantToolParam from .....types.beta.assistant_stream_event import AssistantStreamEvent -from .....types.beta.truncation_object_param import TruncationObjectParam from .....types.beta.threads.runs.run_step_include import RunStepInclude from .....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -105,7 +104,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -255,7 +254,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -405,7 +404,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -555,7 +554,7 @@ def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1516,7 +1515,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1666,7 +1665,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1816,7 +1815,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1966,7 +1965,7 @@ async def create( tool_choice: Optional[AssistantToolChoiceOptionParam] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[run_create_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/beta/threads/threads.py b/src/openai/resources/beta/threads/threads.py index ec5a8ea2cf..13d8cb6411 100644 --- a/src/openai/resources/beta/threads/threads.py +++ b/src/openai/resources/beta/threads/threads.py @@ -52,7 +52,6 @@ from ....types.shared_params.metadata import Metadata from ....types.beta.assistant_tool_param import AssistantToolParam from ....types.beta.assistant_stream_event import AssistantStreamEvent -from ....types.beta.truncation_object_param import TruncationObjectParam from ....types.beta.assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from ....types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam @@ -286,7 +285,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -419,7 +418,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -552,7 +551,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -685,7 +684,7 @@ def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1135,7 +1134,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1268,7 +1267,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1401,7 +1400,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, @@ -1534,7 +1533,7 @@ async def create_and_run( tool_resources: Optional[thread_create_and_run_params.ToolResources] | NotGiven = NOT_GIVEN, tools: Optional[Iterable[AssistantToolParam]] | NotGiven = NOT_GIVEN, top_p: Optional[float] | NotGiven = NOT_GIVEN, - truncation_strategy: Optional[TruncationObjectParam] | NotGiven = NOT_GIVEN, + truncation_strategy: Optional[thread_create_and_run_params.TruncationStrategy] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. extra_headers: Headers | None = None, diff --git a/src/openai/resources/vector_stores/vector_stores.py b/src/openai/resources/vector_stores/vector_stores.py index 7f353af080..9fc17b183b 100644 --- a/src/openai/resources/vector_stores/vector_stores.py +++ b/src/openai/resources/vector_stores/vector_stores.py @@ -43,7 +43,6 @@ from ...types.shared_params.metadata import Metadata from ...types.file_chunking_strategy_param import FileChunkingStrategyParam from ...types.vector_store_search_response import VectorStoreSearchResponse -from ...types.vector_store_expiration_after_param import VectorStoreExpirationAfterParam __all__ = ["VectorStores", "AsyncVectorStores"] @@ -80,7 +79,7 @@ def create( self, *, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, - expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, + expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, @@ -178,7 +177,7 @@ def update( self, vector_store_id: str, *, - expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, + expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. @@ -425,7 +424,7 @@ async def create( self, *, chunking_strategy: FileChunkingStrategyParam | NotGiven = NOT_GIVEN, - expires_after: VectorStoreExpirationAfterParam | NotGiven = NOT_GIVEN, + expires_after: vector_store_create_params.ExpiresAfter | NotGiven = NOT_GIVEN, file_ids: List[str] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: str | NotGiven = NOT_GIVEN, @@ -523,7 +522,7 @@ async def update( self, vector_store_id: str, *, - expires_after: Optional[VectorStoreExpirationAfterParam] | NotGiven = NOT_GIVEN, + expires_after: Optional[vector_store_update_params.ExpiresAfter] | NotGiven = NOT_GIVEN, metadata: Optional[Metadata] | NotGiven = NOT_GIVEN, name: Optional[str] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. diff --git a/src/openai/types/__init__.py b/src/openai/types/__init__.py index de6665155f..bf5493fd62 100644 --- a/src/openai/types/__init__.py +++ b/src/openai/types/__init__.py @@ -6,7 +6,6 @@ from .image import Image as Image from .model import Model as Model from .shared import ( - EvalItem as EvalItem, Metadata as Metadata, AllModels as AllModels, ChatModel as ChatModel, @@ -71,20 +70,17 @@ from .vector_store_search_params import VectorStoreSearchParams as VectorStoreSearchParams from .vector_store_update_params import VectorStoreUpdateParams as VectorStoreUpdateParams from .moderation_text_input_param import ModerationTextInputParam as ModerationTextInputParam -from .eval_logs_data_source_config import EvalLogsDataSourceConfig as EvalLogsDataSourceConfig from .file_chunking_strategy_param import FileChunkingStrategyParam as FileChunkingStrategyParam from .vector_store_search_response import VectorStoreSearchResponse as VectorStoreSearchResponse from .websocket_connection_options import WebsocketConnectionOptions as WebsocketConnectionOptions from .image_create_variation_params import ImageCreateVariationParams as ImageCreateVariationParams from .static_file_chunking_strategy import StaticFileChunkingStrategy as StaticFileChunkingStrategy -from .vector_store_expiration_after import VectorStoreExpirationAfter as VectorStoreExpirationAfter from .eval_custom_data_source_config import EvalCustomDataSourceConfig as EvalCustomDataSourceConfig from .moderation_image_url_input_param import ModerationImageURLInputParam as ModerationImageURLInputParam from .auto_file_chunking_strategy_param import AutoFileChunkingStrategyParam as AutoFileChunkingStrategyParam from .moderation_multi_modal_input_param import ModerationMultiModalInputParam as ModerationMultiModalInputParam from .other_file_chunking_strategy_object import OtherFileChunkingStrategyObject as OtherFileChunkingStrategyObject from .static_file_chunking_strategy_param import StaticFileChunkingStrategyParam as StaticFileChunkingStrategyParam -from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam as VectorStoreExpirationAfterParam from .static_file_chunking_strategy_object import StaticFileChunkingStrategyObject as StaticFileChunkingStrategyObject from .eval_stored_completions_data_source_config import ( EvalStoredCompletionsDataSourceConfig as EvalStoredCompletionsDataSourceConfig, diff --git a/src/openai/types/beta/__init__.py b/src/openai/types/beta/__init__.py index bfcaed7532..5ba3eadf3c 100644 --- a/src/openai/types/beta/__init__.py +++ b/src/openai/types/beta/__init__.py @@ -9,7 +9,6 @@ from .thread_deleted import ThreadDeleted as ThreadDeleted from .file_search_tool import FileSearchTool as FileSearchTool from .assistant_deleted import AssistantDeleted as AssistantDeleted -from .truncation_object import TruncationObject as TruncationObject from .function_tool_param import FunctionToolParam as FunctionToolParam from .assistant_tool_param import AssistantToolParam as AssistantToolParam from .thread_create_params import ThreadCreateParams as ThreadCreateParams @@ -21,7 +20,6 @@ from .file_search_tool_param import FileSearchToolParam as FileSearchToolParam from .assistant_create_params import AssistantCreateParams as AssistantCreateParams from .assistant_update_params import AssistantUpdateParams as AssistantUpdateParams -from .truncation_object_param import TruncationObjectParam as TruncationObjectParam from .assistant_tool_choice_param import AssistantToolChoiceParam as AssistantToolChoiceParam from .code_interpreter_tool_param import CodeInterpreterToolParam as CodeInterpreterToolParam from .assistant_tool_choice_option import AssistantToolChoiceOption as AssistantToolChoiceOption diff --git a/src/openai/types/beta/thread_create_and_run_params.py b/src/openai/types/beta/thread_create_and_run_params.py index 7ba71b0ba3..d813710579 100644 --- a/src/openai/types/beta/thread_create_and_run_params.py +++ b/src/openai/types/beta/thread_create_and_run_params.py @@ -8,7 +8,6 @@ from ..shared.chat_model import ChatModel from .assistant_tool_param import AssistantToolParam from ..shared_params.metadata import Metadata -from .truncation_object_param import TruncationObjectParam from .code_interpreter_tool_param import CodeInterpreterToolParam from .assistant_tool_choice_option_param import AssistantToolChoiceOptionParam from .threads.message_content_part_param import MessageContentPartParam @@ -32,6 +31,7 @@ "ToolResources", "ToolResourcesCodeInterpreter", "ToolResourcesFileSearch", + "TruncationStrategy", "ThreadCreateAndRunParamsNonStreaming", "ThreadCreateAndRunParamsStreaming", ] @@ -166,7 +166,7 @@ class ThreadCreateAndRunParamsBase(TypedDict, total=False): We generally recommend altering this or temperature but not both. """ - truncation_strategy: Optional[TruncationObjectParam] + truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -358,6 +358,23 @@ class ToolResources(TypedDict, total=False): file_search: ToolResourcesFileSearch +class TruncationStrategy(TypedDict, total=False): + type: Required[Literal["auto", "last_messages"]] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ + + class ThreadCreateAndRunParamsNonStreaming(ThreadCreateAndRunParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/openai/types/beta/threads/run.py b/src/openai/types/beta/threads/run.py index e5a7808417..da9418d6f9 100644 --- a/src/openai/types/beta/threads/run.py +++ b/src/openai/types/beta/threads/run.py @@ -7,12 +7,19 @@ from .run_status import RunStatus from ..assistant_tool import AssistantTool from ...shared.metadata import Metadata -from ..truncation_object import TruncationObject from ..assistant_tool_choice_option import AssistantToolChoiceOption from ..assistant_response_format_option import AssistantResponseFormatOption from .required_action_function_tool_call import RequiredActionFunctionToolCall -__all__ = ["Run", "IncompleteDetails", "LastError", "RequiredAction", "RequiredActionSubmitToolOutputs", "Usage"] +__all__ = [ + "Run", + "IncompleteDetails", + "LastError", + "RequiredAction", + "RequiredActionSubmitToolOutputs", + "TruncationStrategy", + "Usage", +] class IncompleteDetails(BaseModel): @@ -45,6 +52,23 @@ class RequiredAction(BaseModel): """For now, this is always `submit_tool_outputs`.""" +class TruncationStrategy(BaseModel): + type: Literal["auto", "last_messages"] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] = None + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ + + class Usage(BaseModel): completion_tokens: int """Number of completion tokens used over the course of the run.""" @@ -201,7 +225,7 @@ class Run(BaseModel): this run. """ - truncation_strategy: Optional[TruncationObject] = None + truncation_strategy: Optional[TruncationStrategy] = None """Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. diff --git a/src/openai/types/beta/threads/run_create_params.py b/src/openai/types/beta/threads/run_create_params.py index 80656aada4..fc70227862 100644 --- a/src/openai/types/beta/threads/run_create_params.py +++ b/src/openai/types/beta/threads/run_create_params.py @@ -9,7 +9,6 @@ from ..assistant_tool_param import AssistantToolParam from .runs.run_step_include import RunStepInclude from ...shared_params.metadata import Metadata -from ..truncation_object_param import TruncationObjectParam from ...shared.reasoning_effort import ReasoningEffort from .message_content_part_param import MessageContentPartParam from ..code_interpreter_tool_param import CodeInterpreterToolParam @@ -22,6 +21,7 @@ "AdditionalMessageAttachment", "AdditionalMessageAttachmentTool", "AdditionalMessageAttachmentToolFileSearch", + "TruncationStrategy", "RunCreateParamsNonStreaming", "RunCreateParamsStreaming", ] @@ -173,7 +173,7 @@ class RunCreateParamsBase(TypedDict, total=False): We generally recommend altering this or temperature but not both. """ - truncation_strategy: Optional[TruncationObjectParam] + truncation_strategy: Optional[TruncationStrategy] """Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. @@ -223,6 +223,23 @@ class AdditionalMessage(TypedDict, total=False): """ +class TruncationStrategy(TypedDict, total=False): + type: Required[Literal["auto", "last_messages"]] + """The truncation strategy to use for the thread. + + The default is `auto`. If set to `last_messages`, the thread will be truncated + to the n most recent messages in the thread. When set to `auto`, messages in the + middle of the thread will be dropped to fit the context length of the model, + `max_prompt_tokens`. + """ + + last_messages: Optional[int] + """ + The number of most recent messages from the thread when constructing the context + for the run. + """ + + class RunCreateParamsNonStreaming(RunCreateParamsBase, total=False): stream: Optional[Literal[False]] """ diff --git a/src/openai/types/beta/truncation_object.py b/src/openai/types/beta/truncation_object.py deleted file mode 100644 index 7c81b3b5bc..0000000000 --- a/src/openai/types/beta/truncation_object.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["TruncationObject"] - - -class TruncationObject(BaseModel): - type: Literal["auto", "last_messages"] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] = None - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ diff --git a/src/openai/types/beta/truncation_object_param.py b/src/openai/types/beta/truncation_object_param.py deleted file mode 100644 index 98d942fa09..0000000000 --- a/src/openai/types/beta/truncation_object_param.py +++ /dev/null @@ -1,25 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Optional -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["TruncationObjectParam"] - - -class TruncationObjectParam(TypedDict, total=False): - type: Required[Literal["auto", "last_messages"]] - """The truncation strategy to use for the thread. - - The default is `auto`. If set to `last_messages`, the thread will be truncated - to the n most recent messages in the thread. When set to `auto`, messages in the - middle of the thread will be dropped to fit the context length of the model, - `max_prompt_tokens`. - """ - - last_messages: Optional[int] - """ - The number of most recent messages from the thread when constructing the context - for the run. - """ diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index 95fd0bb8d8..8d508a2d8e 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -6,10 +6,10 @@ from typing_extensions import Literal, Required, TypeAlias, TypedDict from .shared_params.metadata import Metadata -from .shared_params.eval_item import EvalItem from .graders.python_grader_param import PythonGraderParam from .graders.score_model_grader_param import ScoreModelGraderParam from .graders.string_check_grader_param import StringCheckGraderParam +from .responses.response_input_text_param import ResponseInputTextParam from .graders.text_similarity_grader_param import TextSimilarityGraderParam __all__ = [ @@ -22,6 +22,9 @@ "TestingCriterionLabelModel", "TestingCriterionLabelModelInput", "TestingCriterionLabelModelInputSimpleInputMessage", + "TestingCriterionLabelModelInputEvalItem", + "TestingCriterionLabelModelInputEvalItemContent", + "TestingCriterionLabelModelInputEvalItemContentOutputText", "TestingCriterionTextSimilarity", "TestingCriterionPython", "TestingCriterionScoreModel", @@ -90,7 +93,36 @@ class TestingCriterionLabelModelInputSimpleInputMessage(TypedDict, total=False): """The role of the message (e.g. "system", "assistant", "user").""" -TestingCriterionLabelModelInput: TypeAlias = Union[TestingCriterionLabelModelInputSimpleInputMessage, EvalItem] +class TestingCriterionLabelModelInputEvalItemContentOutputText(TypedDict, total=False): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +TestingCriterionLabelModelInputEvalItemContent: TypeAlias = Union[ + str, ResponseInputTextParam, TestingCriterionLabelModelInputEvalItemContentOutputText +] + + +class TestingCriterionLabelModelInputEvalItem(TypedDict, total=False): + content: Required[TestingCriterionLabelModelInputEvalItemContent] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" + + +TestingCriterionLabelModelInput: TypeAlias = Union[ + TestingCriterionLabelModelInputSimpleInputMessage, TestingCriterionLabelModelInputEvalItem +] class TestingCriterionLabelModel(TypedDict, total=False): diff --git a/src/openai/types/eval_create_response.py b/src/openai/types/eval_create_response.py index 2bf7643b53..20b0e3127f 100644 --- a/src/openai/types/eval_create_response.py +++ b/src/openai/types/eval_create_response.py @@ -1,8 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias +from pydantic import Field as FieldInfo + from .._utils import PropertyInfo from .._models import BaseModel from .shared.metadata import Metadata @@ -10,7 +12,6 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader -from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -18,14 +19,37 @@ __all__ = [ "EvalCreateResponse", "DataSourceConfig", + "DataSourceConfigLogs", "TestingCriterion", "TestingCriterionEvalGraderTextSimilarity", "TestingCriterionEvalGraderPython", "TestingCriterionEvalGraderScoreModel", ] + +class DataSourceConfigLogs(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The json schema for the run data source items. Learn how to build JSON schemas + [here](https://json-schema.org/). + """ + + type: Literal["logs"] + """The type of data source. Always `logs`.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/eval_list_response.py b/src/openai/types/eval_list_response.py index e52f3db1c4..5ac4997cf6 100644 --- a/src/openai/types/eval_list_response.py +++ b/src/openai/types/eval_list_response.py @@ -1,8 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias +from pydantic import Field as FieldInfo + from .._utils import PropertyInfo from .._models import BaseModel from .shared.metadata import Metadata @@ -10,7 +12,6 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader -from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -18,14 +19,37 @@ __all__ = [ "EvalListResponse", "DataSourceConfig", + "DataSourceConfigLogs", "TestingCriterion", "TestingCriterionEvalGraderTextSimilarity", "TestingCriterionEvalGraderPython", "TestingCriterionEvalGraderScoreModel", ] + +class DataSourceConfigLogs(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The json schema for the run data source items. Learn how to build JSON schemas + [here](https://json-schema.org/). + """ + + type: Literal["logs"] + """The type of data source. Always `logs`.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/eval_logs_data_source_config.py b/src/openai/types/eval_logs_data_source_config.py deleted file mode 100644 index a3eb245e07..0000000000 --- a/src/openai/types/eval_logs_data_source_config.py +++ /dev/null @@ -1,32 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, Optional -from typing_extensions import Literal - -from pydantic import Field as FieldInfo - -from .._models import BaseModel -from .shared.metadata import Metadata - -__all__ = ["EvalLogsDataSourceConfig"] - - -class EvalLogsDataSourceConfig(BaseModel): - schema_: Dict[str, object] = FieldInfo(alias="schema") - """ - The json schema for the run data source items. Learn how to build JSON schemas - [here](https://json-schema.org/). - """ - - type: Literal["logs"] - """The type of data source. Always `logs`.""" - - metadata: Optional[Metadata] = None - """Set of 16 key-value pairs that can be attached to an object. - - This can be useful for storing additional information about the object in a - structured format, and querying for objects via API or the dashboard. - - Keys are strings with a maximum length of 64 characters. Values are strings with - a maximum length of 512 characters. - """ diff --git a/src/openai/types/eval_retrieve_response.py b/src/openai/types/eval_retrieve_response.py index 71ed96d5ab..758f9cc040 100644 --- a/src/openai/types/eval_retrieve_response.py +++ b/src/openai/types/eval_retrieve_response.py @@ -1,8 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias +from pydantic import Field as FieldInfo + from .._utils import PropertyInfo from .._models import BaseModel from .shared.metadata import Metadata @@ -10,7 +12,6 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader -from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -18,14 +19,37 @@ __all__ = [ "EvalRetrieveResponse", "DataSourceConfig", + "DataSourceConfigLogs", "TestingCriterion", "TestingCriterionEvalGraderTextSimilarity", "TestingCriterionEvalGraderPython", "TestingCriterionEvalGraderScoreModel", ] + +class DataSourceConfigLogs(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The json schema for the run data source items. Learn how to build JSON schemas + [here](https://json-schema.org/). + """ + + type: Literal["logs"] + """The type of data source. Always `logs`.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/eval_update_response.py b/src/openai/types/eval_update_response.py index 73ee6eb58c..3f0b90ae03 100644 --- a/src/openai/types/eval_update_response.py +++ b/src/openai/types/eval_update_response.py @@ -1,8 +1,10 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias +from pydantic import Field as FieldInfo + from .._utils import PropertyInfo from .._models import BaseModel from .shared.metadata import Metadata @@ -10,7 +12,6 @@ from .graders.label_model_grader import LabelModelGrader from .graders.score_model_grader import ScoreModelGrader from .graders.string_check_grader import StringCheckGrader -from .eval_logs_data_source_config import EvalLogsDataSourceConfig from .eval_custom_data_source_config import EvalCustomDataSourceConfig from .graders.text_similarity_grader import TextSimilarityGrader from .eval_stored_completions_data_source_config import EvalStoredCompletionsDataSourceConfig @@ -18,14 +19,37 @@ __all__ = [ "EvalUpdateResponse", "DataSourceConfig", + "DataSourceConfigLogs", "TestingCriterion", "TestingCriterionEvalGraderTextSimilarity", "TestingCriterionEvalGraderPython", "TestingCriterionEvalGraderScoreModel", ] + +class DataSourceConfigLogs(BaseModel): + schema_: Dict[str, object] = FieldInfo(alias="schema") + """ + The json schema for the run data source items. Learn how to build JSON schemas + [here](https://json-schema.org/). + """ + + type: Literal["logs"] + """The type of data source. Always `logs`.""" + + metadata: Optional[Metadata] = None + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + DataSourceConfig: TypeAlias = Annotated[ - Union[EvalCustomDataSourceConfig, EvalLogsDataSourceConfig, EvalStoredCompletionsDataSourceConfig], + Union[EvalCustomDataSourceConfig, DataSourceConfigLogs, EvalStoredCompletionsDataSourceConfig], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/__init__.py b/src/openai/types/evals/__init__.py index 7841a40382..ebf84c6b8d 100644 --- a/src/openai/types/evals/__init__.py +++ b/src/openai/types/evals/__init__.py @@ -10,21 +10,13 @@ from .run_create_response import RunCreateResponse as RunCreateResponse from .run_delete_response import RunDeleteResponse as RunDeleteResponse from .run_retrieve_response import RunRetrieveResponse as RunRetrieveResponse -from .eval_jsonl_file_id_source import EvalJSONLFileIDSource as EvalJSONLFileIDSource -from .eval_jsonl_file_content_source import EvalJSONLFileContentSource as EvalJSONLFileContentSource -from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam as EvalJSONLFileIDSourceParam from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource as CreateEvalJSONLRunDataSource -from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam as EvalJSONLFileContentSourceParam -from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource as CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import ( CreateEvalCompletionsRunDataSource as CreateEvalCompletionsRunDataSource, ) from .create_eval_jsonl_run_data_source_param import ( CreateEvalJSONLRunDataSourceParam as CreateEvalJSONLRunDataSourceParam, ) -from .create_eval_responses_run_data_source_param import ( - CreateEvalResponsesRunDataSourceParam as CreateEvalResponsesRunDataSourceParam, -) from .create_eval_completions_run_data_source_param import ( CreateEvalCompletionsRunDataSourceParam as CreateEvalCompletionsRunDataSourceParam, ) diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 439fcc5d7b..29c687b542 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -1,28 +1,54 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo from ..._models import BaseModel from ..shared.metadata import Metadata -from ..shared.eval_item import EvalItem -from .eval_jsonl_file_id_source import EvalJSONLFileIDSource from ..responses.easy_input_message import EasyInputMessage -from .eval_jsonl_file_content_source import EvalJSONLFileContentSource +from ..responses.response_input_text import ResponseInputText __all__ = [ "CreateEvalCompletionsRunDataSource", "Source", + "SourceFileContent", + "SourceFileContentContent", + "SourceFileID", "SourceStoredCompletions", "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", + "InputMessagesTemplateTemplateMessage", + "InputMessagesTemplateTemplateMessageContent", + "InputMessagesTemplateTemplateMessageContentOutputText", "InputMessagesItemReference", "SamplingParams", ] +class SourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class SourceFileContent(BaseModel): + content: List[SourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class SourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + class SourceStoredCompletions(BaseModel): type: Literal["stored_completions"] """The type of source. Always `stored_completions`.""" @@ -51,12 +77,39 @@ class SourceStoredCompletions(BaseModel): Source: TypeAlias = Annotated[ - Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceStoredCompletions], - PropertyInfo(discriminator="type"), + Union[SourceFileContent, SourceFileID, SourceStoredCompletions], PropertyInfo(discriminator="type") +] + + +class InputMessagesTemplateTemplateMessageContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ + str, ResponseInputText, InputMessagesTemplateTemplateMessageContentOutputText ] + +class InputMessagesTemplateTemplateMessage(BaseModel): + content: InputMessagesTemplateTemplateMessageContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" + + InputMessagesTemplateTemplate: TypeAlias = Annotated[ - Union[EasyInputMessage, EvalItem], PropertyInfo(discriminator="type") + Union[EasyInputMessage, InputMessagesTemplateTemplateMessage], PropertyInfo(discriminator="type") ] diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index e94443d953..c53064ee27 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -2,27 +2,53 @@ from __future__ import annotations -from typing import Union, Iterable, Optional +from typing import Dict, Union, Iterable, Optional from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata -from ..shared_params.eval_item import EvalItem -from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam from ..responses.easy_input_message_param import EasyInputMessageParam -from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam +from ..responses.response_input_text_param import ResponseInputTextParam __all__ = [ "CreateEvalCompletionsRunDataSourceParam", "Source", + "SourceFileContent", + "SourceFileContentContent", + "SourceFileID", "SourceStoredCompletions", "InputMessages", "InputMessagesTemplate", "InputMessagesTemplateTemplate", + "InputMessagesTemplateTemplateMessage", + "InputMessagesTemplateTemplateMessageContent", + "InputMessagesTemplateTemplateMessageContentOutputText", "InputMessagesItemReference", "SamplingParams", ] +class SourceFileContentContent(TypedDict, total=False): + item: Required[Dict[str, object]] + + sample: Dict[str, object] + + +class SourceFileContent(TypedDict, total=False): + content: Required[Iterable[SourceFileContentContent]] + """The content of the jsonl file.""" + + type: Required[Literal["file_content"]] + """The type of jsonl source. Always `file_content`.""" + + +class SourceFileID(TypedDict, total=False): + id: Required[str] + """The identifier of the file.""" + + type: Required[Literal["file_id"]] + """The type of jsonl source. Always `file_id`.""" + + class SourceStoredCompletions(TypedDict, total=False): type: Required[Literal["stored_completions"]] """The type of source. Always `stored_completions`.""" @@ -50,9 +76,37 @@ class SourceStoredCompletions(TypedDict, total=False): """An optional model to filter by (e.g., 'gpt-4o').""" -Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceStoredCompletions] +Source: TypeAlias = Union[SourceFileContent, SourceFileID, SourceStoredCompletions] + + +class InputMessagesTemplateTemplateMessageContentOutputText(TypedDict, total=False): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +InputMessagesTemplateTemplateMessageContent: TypeAlias = Union[ + str, ResponseInputTextParam, InputMessagesTemplateTemplateMessageContentOutputText +] + + +class InputMessagesTemplateTemplateMessage(TypedDict, total=False): + content: Required[InputMessagesTemplateTemplateMessageContent] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" + -InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, EvalItem] +InputMessagesTemplateTemplate: TypeAlias = Union[EasyInputMessageParam, InputMessagesTemplateTemplateMessage] class InputMessagesTemplate(TypedDict, total=False): diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source.py b/src/openai/types/evals/create_eval_jsonl_run_data_source.py index 03c6550744..d2be56243b 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source.py @@ -1,18 +1,37 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import Union +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from ..._utils import PropertyInfo from ..._models import BaseModel -from .eval_jsonl_file_id_source import EvalJSONLFileIDSource -from .eval_jsonl_file_content_source import EvalJSONLFileContentSource -__all__ = ["CreateEvalJSONLRunDataSource", "Source"] +__all__ = ["CreateEvalJSONLRunDataSource", "Source", "SourceFileContent", "SourceFileContentContent", "SourceFileID"] -Source: TypeAlias = Annotated[ - Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource], PropertyInfo(discriminator="type") -] + +class SourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class SourceFileContent(BaseModel): + content: List[SourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class SourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + +Source: TypeAlias = Annotated[Union[SourceFileContent, SourceFileID], PropertyInfo(discriminator="type")] class CreateEvalJSONLRunDataSource(BaseModel): diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py index cc71925782..b8ba48a666 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py @@ -2,15 +2,41 @@ from __future__ import annotations -from typing import Union +from typing import Dict, Union, Iterable from typing_extensions import Literal, Required, TypeAlias, TypedDict -from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam -from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam +__all__ = [ + "CreateEvalJSONLRunDataSourceParam", + "Source", + "SourceFileContent", + "SourceFileContentContent", + "SourceFileID", +] -__all__ = ["CreateEvalJSONLRunDataSourceParam", "Source"] -Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam] +class SourceFileContentContent(TypedDict, total=False): + item: Required[Dict[str, object]] + + sample: Dict[str, object] + + +class SourceFileContent(TypedDict, total=False): + content: Required[Iterable[SourceFileContentContent]] + """The content of the jsonl file.""" + + type: Required[Literal["file_content"]] + """The type of jsonl source. Always `file_content`.""" + + +class SourceFileID(TypedDict, total=False): + id: Required[str] + """The identifier of the file.""" + + type: Required[Literal["file_id"]] + """The type of jsonl source. Always `file_id`.""" + + +Source: TypeAlias = Union[SourceFileContent, SourceFileID] class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False): diff --git a/src/openai/types/evals/create_eval_responses_run_data_source.py b/src/openai/types/evals/create_eval_responses_run_data_source.py deleted file mode 100644 index 268eab2173..0000000000 --- a/src/openai/types/evals/create_eval_responses_run_data_source.py +++ /dev/null @@ -1,151 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import List, Union, Optional -from typing_extensions import Literal, Annotated, TypeAlias - -from ..._utils import PropertyInfo -from ..._models import BaseModel -from ..shared.eval_item import EvalItem -from ..shared.reasoning_effort import ReasoningEffort -from .eval_jsonl_file_id_source import EvalJSONLFileIDSource -from .eval_jsonl_file_content_source import EvalJSONLFileContentSource - -__all__ = [ - "CreateEvalResponsesRunDataSource", - "Source", - "SourceResponses", - "InputMessages", - "InputMessagesTemplate", - "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateChatMessage", - "InputMessagesItemReference", - "SamplingParams", -] - - -class SourceResponses(BaseModel): - type: Literal["responses"] - """The type of run data source. Always `responses`.""" - - created_after: Optional[int] = None - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] = None - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] = None - """Optional string to search the 'instructions' field. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] = None - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] = None - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] = None - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] = None - """Sampling temperature. This is a query parameter used to select responses.""" - - tools: Optional[List[str]] = None - """List of tool names. This is a query parameter used to select responses.""" - - top_p: Optional[float] = None - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] = None - """List of user identifiers. This is a query parameter used to select responses.""" - - -Source: TypeAlias = Annotated[ - Union[EvalJSONLFileContentSource, EvalJSONLFileIDSource, SourceResponses], PropertyInfo(discriminator="type") -] - - -class InputMessagesTemplateTemplateChatMessage(BaseModel): - content: str - """The content of the message.""" - - role: str - """The role of the message (e.g. "system", "assistant", "user").""" - - -InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem] - - -class InputMessagesTemplate(BaseModel): - template: List[InputMessagesTemplateTemplate] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Literal["template"] - """The type of input messages. Always `template`.""" - - -class InputMessagesItemReference(BaseModel): - item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Literal["item_reference"] - """The type of input messages. Always `item_reference`.""" - - -InputMessages: TypeAlias = Annotated[ - Union[InputMessagesTemplate, InputMessagesItemReference], PropertyInfo(discriminator="type") -] - - -class SamplingParams(BaseModel): - max_completion_tokens: Optional[int] = None - """The maximum number of tokens in the generated output.""" - - seed: Optional[int] = None - """A seed value to initialize the randomness, during sampling.""" - - temperature: Optional[float] = None - """A higher temperature increases randomness in the outputs.""" - - top_p: Optional[float] = None - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class CreateEvalResponsesRunDataSource(BaseModel): - source: Source - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Literal["responses"] - """The type of run data source. Always `responses`.""" - - input_messages: Optional[InputMessages] = None - - model: Optional[str] = None - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: Optional[SamplingParams] = None diff --git a/src/openai/types/evals/create_eval_responses_run_data_source_param.py b/src/openai/types/evals/create_eval_responses_run_data_source_param.py deleted file mode 100644 index 02d45a9e13..0000000000 --- a/src/openai/types/evals/create_eval_responses_run_data_source_param.py +++ /dev/null @@ -1,147 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import List, Union, Iterable, Optional -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..shared.reasoning_effort import ReasoningEffort -from ..shared_params.eval_item import EvalItem -from .eval_jsonl_file_id_source_param import EvalJSONLFileIDSourceParam -from .eval_jsonl_file_content_source_param import EvalJSONLFileContentSourceParam - -__all__ = [ - "CreateEvalResponsesRunDataSourceParam", - "Source", - "SourceResponses", - "InputMessages", - "InputMessagesTemplate", - "InputMessagesTemplateTemplate", - "InputMessagesTemplateTemplateChatMessage", - "InputMessagesItemReference", - "SamplingParams", -] - - -class SourceResponses(TypedDict, total=False): - type: Required[Literal["responses"]] - """The type of run data source. Always `responses`.""" - - created_after: Optional[int] - """Only include items created after this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - created_before: Optional[int] - """Only include items created before this timestamp (inclusive). - - This is a query parameter used to select responses. - """ - - has_tool_calls: Optional[bool] - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - - instructions_search: Optional[str] - """Optional string to search the 'instructions' field. - - This is a query parameter used to select responses. - """ - - metadata: Optional[object] - """Metadata filter for the responses. - - This is a query parameter used to select responses. - """ - - model: Optional[str] - """The name of the model to find responses for. - - This is a query parameter used to select responses. - """ - - reasoning_effort: Optional[ReasoningEffort] - """Optional reasoning effort parameter. - - This is a query parameter used to select responses. - """ - - temperature: Optional[float] - """Sampling temperature. This is a query parameter used to select responses.""" - - tools: Optional[List[str]] - """List of tool names. This is a query parameter used to select responses.""" - - top_p: Optional[float] - """Nucleus sampling parameter. This is a query parameter used to select responses.""" - - users: Optional[List[str]] - """List of user identifiers. This is a query parameter used to select responses.""" - - -Source: TypeAlias = Union[EvalJSONLFileContentSourceParam, EvalJSONLFileIDSourceParam, SourceResponses] - - -class InputMessagesTemplateTemplateChatMessage(TypedDict, total=False): - content: Required[str] - """The content of the message.""" - - role: Required[str] - """The role of the message (e.g. "system", "assistant", "user").""" - - -InputMessagesTemplateTemplate: TypeAlias = Union[InputMessagesTemplateTemplateChatMessage, EvalItem] - - -class InputMessagesTemplate(TypedDict, total=False): - template: Required[Iterable[InputMessagesTemplateTemplate]] - """A list of chat messages forming the prompt or context. - - May include variable references to the "item" namespace, ie {{item.name}}. - """ - - type: Required[Literal["template"]] - """The type of input messages. Always `template`.""" - - -class InputMessagesItemReference(TypedDict, total=False): - item_reference: Required[str] - """A reference to a variable in the "item" namespace. Ie, "item.name" """ - - type: Required[Literal["item_reference"]] - """The type of input messages. Always `item_reference`.""" - - -InputMessages: TypeAlias = Union[InputMessagesTemplate, InputMessagesItemReference] - - -class SamplingParams(TypedDict, total=False): - max_completion_tokens: int - """The maximum number of tokens in the generated output.""" - - seed: int - """A seed value to initialize the randomness, during sampling.""" - - temperature: float - """A higher temperature increases randomness in the outputs.""" - - top_p: float - """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" - - -class CreateEvalResponsesRunDataSourceParam(TypedDict, total=False): - source: Required[Source] - """A EvalResponsesSource object describing a run data source configuration.""" - - type: Required[Literal["responses"]] - """The type of run data source. Always `responses`.""" - - input_messages: InputMessages - - model: str - """The name of the model to use for generating completions (e.g. "o3-mini").""" - - sampling_params: SamplingParams diff --git a/src/openai/types/evals/eval_jsonl_file_content_source.py b/src/openai/types/evals/eval_jsonl_file_content_source.py deleted file mode 100644 index b18fe8937b..0000000000 --- a/src/openai/types/evals/eval_jsonl_file_content_source.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Dict, List, Optional -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["EvalJSONLFileContentSource", "Content"] - - -class Content(BaseModel): - item: Dict[str, object] - - sample: Optional[Dict[str, object]] = None - - -class EvalJSONLFileContentSource(BaseModel): - content: List[Content] - """The content of the jsonl file.""" - - type: Literal["file_content"] - """The type of jsonl source. Always `file_content`.""" diff --git a/src/openai/types/evals/eval_jsonl_file_content_source_param.py b/src/openai/types/evals/eval_jsonl_file_content_source_param.py deleted file mode 100644 index a70f688762..0000000000 --- a/src/openai/types/evals/eval_jsonl_file_content_source_param.py +++ /dev/null @@ -1,22 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Dict, Iterable -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["EvalJSONLFileContentSourceParam", "Content"] - - -class Content(TypedDict, total=False): - item: Required[Dict[str, object]] - - sample: Dict[str, object] - - -class EvalJSONLFileContentSourceParam(TypedDict, total=False): - content: Required[Iterable[Content]] - """The content of the jsonl file.""" - - type: Required[Literal["file_content"]] - """The type of jsonl source. Always `file_content`.""" diff --git a/src/openai/types/evals/eval_jsonl_file_id_source.py b/src/openai/types/evals/eval_jsonl_file_id_source.py deleted file mode 100644 index 2d317f2ce1..0000000000 --- a/src/openai/types/evals/eval_jsonl_file_id_source.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from ..._models import BaseModel - -__all__ = ["EvalJSONLFileIDSource"] - - -class EvalJSONLFileIDSource(BaseModel): - id: str - """The identifier of the file.""" - - type: Literal["file_id"] - """The type of jsonl source. Always `file_id`.""" diff --git a/src/openai/types/evals/eval_jsonl_file_id_source_param.py b/src/openai/types/evals/eval_jsonl_file_id_source_param.py deleted file mode 100644 index 76b8662cd6..0000000000 --- a/src/openai/types/evals/eval_jsonl_file_id_source_param.py +++ /dev/null @@ -1,15 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["EvalJSONLFileIDSourceParam"] - - -class EvalJSONLFileIDSourceParam(TypedDict, total=False): - id: Required[str] - """The identifier of the file.""" - - type: Required[Literal["file_id"]] - """The type of jsonl source. Always `file_id`.""" diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index a49989b60f..318e7abc35 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,14 +9,219 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource -from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = ["RunCancelResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] +__all__ = [ + "RunCancelResponse", + "DataSource", + "DataSourceResponses", + "DataSourceResponsesSource", + "DataSourceResponsesSourceFileContent", + "DataSourceResponsesSourceFileContentContent", + "DataSourceResponsesSourceFileID", + "DataSourceResponsesSourceResponses", + "DataSourceResponsesInputMessages", + "DataSourceResponsesInputMessagesTemplate", + "DataSourceResponsesInputMessagesTemplateTemplate", + "DataSourceResponsesInputMessagesTemplateTemplateChatMessage", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesItemReference", + "DataSourceResponsesSamplingParams", + "PerModelUsage", + "PerTestingCriteriaResult", + "ResultCounts", +] + + +class DataSourceResponsesSourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class DataSourceResponsesSourceFileContent(BaseModel): + content: List[DataSourceResponsesSourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class DataSourceResponsesSourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + +class DataSourceResponsesSourceResponses(BaseModel): + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] = None + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] = None + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] = None + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] = None + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] = None + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] = None + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] = None + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] = None + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] = None + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] = None + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] = None + """List of user identifiers. This is a query parameter used to select responses.""" + + +DataSourceResponsesSource: TypeAlias = Annotated[ + Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): + content: str + """The content of the message.""" + + role: str + """The role of the message (e.g. "system", "assistant", "user").""" + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText +] + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" + + +DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[ + DataSourceResponsesInputMessagesTemplateTemplateChatMessage, + DataSourceResponsesInputMessagesTemplateTemplateEvalItem, +] + + +class DataSourceResponsesInputMessagesTemplate(BaseModel): + template: List[DataSourceResponsesInputMessagesTemplateTemplate] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Literal["template"] + """The type of input messages. Always `template`.""" + + +class DataSourceResponsesInputMessagesItemReference(BaseModel): + item_reference: str + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Literal["item_reference"] + """The type of input messages. Always `item_reference`.""" + + +DataSourceResponsesInputMessages: TypeAlias = Annotated[ + Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesSamplingParams(BaseModel): + max_completion_tokens: Optional[int] = None + """The maximum number of tokens in the generated output.""" + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class DataSourceResponses(BaseModel): + source: DataSourceResponsesSource + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + input_messages: Optional[DataSourceResponsesInputMessages] = None + + model: Optional[str] = None + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: Optional[DataSourceResponsesSamplingParams] = None + DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index 00c7398748..e030224dcb 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -2,15 +2,34 @@ from __future__ import annotations -from typing import Union, Optional -from typing_extensions import Required, TypeAlias, TypedDict +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict from ..shared_params.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text_param import ResponseInputTextParam from .create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam -from .create_eval_responses_run_data_source_param import CreateEvalResponsesRunDataSourceParam from .create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam -__all__ = ["RunCreateParams", "DataSource"] +__all__ = [ + "RunCreateParams", + "DataSource", + "DataSourceCreateEvalResponsesRunDataSource", + "DataSourceCreateEvalResponsesRunDataSourceSource", + "DataSourceCreateEvalResponsesRunDataSourceSourceFileContent", + "DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent", + "DataSourceCreateEvalResponsesRunDataSourceSourceFileID", + "DataSourceCreateEvalResponsesRunDataSourceSourceResponses", + "DataSourceCreateEvalResponsesRunDataSourceInputMessages", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference", + "DataSourceCreateEvalResponsesRunDataSourceSamplingParams", +] class RunCreateParams(TypedDict, total=False): @@ -31,6 +50,195 @@ class RunCreateParams(TypedDict, total=False): """The name of the run.""" +class DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent(TypedDict, total=False): + item: Required[Dict[str, object]] + + sample: Dict[str, object] + + +class DataSourceCreateEvalResponsesRunDataSourceSourceFileContent(TypedDict, total=False): + content: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceSourceFileContentContent]] + """The content of the jsonl file.""" + + type: Required[Literal["file_content"]] + """The type of jsonl source. Always `file_content`.""" + + +class DataSourceCreateEvalResponsesRunDataSourceSourceFileID(TypedDict, total=False): + id: Required[str] + """The identifier of the file.""" + + type: Required[Literal["file_id"]] + """The type of jsonl source. Always `file_id`.""" + + +class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total=False): + type: Required[Literal["responses"]] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] + """List of user identifiers. This is a query parameter used to select responses.""" + + +DataSourceCreateEvalResponsesRunDataSourceSource: TypeAlias = Union[ + DataSourceCreateEvalResponsesRunDataSourceSourceFileContent, + DataSourceCreateEvalResponsesRunDataSourceSourceFileID, + DataSourceCreateEvalResponsesRunDataSourceSourceResponses, +] + + +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage(TypedDict, total=False): + content: Required[str] + """The content of the message.""" + + role: Required[str] + """The role of the message (e.g. "system", "assistant", "user").""" + + +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText( + TypedDict, total=False +): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, + ResponseInputTextParam, + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText, +] + + +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem(TypedDict, total=False): + content: Required[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContent] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" + + +DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate: TypeAlias = Union[ + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateChatMessage, + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItem, +] + + +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, total=False): + template: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate]] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Required[Literal["template"]] + """The type of input messages. Always `template`.""" + + +class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(TypedDict, total=False): + item_reference: Required[str] + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Required[Literal["item_reference"]] + """The type of input messages. Always `item_reference`.""" + + +DataSourceCreateEvalResponsesRunDataSourceInputMessages: TypeAlias = Union[ + DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate, + DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference, +] + + +class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total=False): + max_completion_tokens: int + """The maximum number of tokens in the generated output.""" + + seed: int + """A seed value to initialize the randomness, during sampling.""" + + temperature: float + """A higher temperature increases randomness in the outputs.""" + + top_p: float + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False): + source: Required[DataSourceCreateEvalResponsesRunDataSourceSource] + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Required[Literal["responses"]] + """The type of run data source. Always `responses`.""" + + input_messages: DataSourceCreateEvalResponsesRunDataSourceInputMessages + + model: str + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: DataSourceCreateEvalResponsesRunDataSourceSamplingParams + + DataSource: TypeAlias = Union[ - CreateEvalJSONLRunDataSourceParam, CreateEvalCompletionsRunDataSourceParam, CreateEvalResponsesRunDataSourceParam + CreateEvalJSONLRunDataSourceParam, + CreateEvalCompletionsRunDataSourceParam, + DataSourceCreateEvalResponsesRunDataSource, ] diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 8dc64cf895..902e45c9bc 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,14 +9,219 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource -from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = ["RunCreateResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] +__all__ = [ + "RunCreateResponse", + "DataSource", + "DataSourceResponses", + "DataSourceResponsesSource", + "DataSourceResponsesSourceFileContent", + "DataSourceResponsesSourceFileContentContent", + "DataSourceResponsesSourceFileID", + "DataSourceResponsesSourceResponses", + "DataSourceResponsesInputMessages", + "DataSourceResponsesInputMessagesTemplate", + "DataSourceResponsesInputMessagesTemplateTemplate", + "DataSourceResponsesInputMessagesTemplateTemplateChatMessage", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesItemReference", + "DataSourceResponsesSamplingParams", + "PerModelUsage", + "PerTestingCriteriaResult", + "ResultCounts", +] + + +class DataSourceResponsesSourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class DataSourceResponsesSourceFileContent(BaseModel): + content: List[DataSourceResponsesSourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class DataSourceResponsesSourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + +class DataSourceResponsesSourceResponses(BaseModel): + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] = None + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] = None + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] = None + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] = None + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] = None + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] = None + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] = None + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] = None + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] = None + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] = None + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] = None + """List of user identifiers. This is a query parameter used to select responses.""" + + +DataSourceResponsesSource: TypeAlias = Annotated[ + Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): + content: str + """The content of the message.""" + + role: str + """The role of the message (e.g. "system", "assistant", "user").""" + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText +] + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" + + +DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[ + DataSourceResponsesInputMessagesTemplateTemplateChatMessage, + DataSourceResponsesInputMessagesTemplateTemplateEvalItem, +] + + +class DataSourceResponsesInputMessagesTemplate(BaseModel): + template: List[DataSourceResponsesInputMessagesTemplateTemplate] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Literal["template"] + """The type of input messages. Always `template`.""" + + +class DataSourceResponsesInputMessagesItemReference(BaseModel): + item_reference: str + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Literal["item_reference"] + """The type of input messages. Always `item_reference`.""" + + +DataSourceResponsesInputMessages: TypeAlias = Annotated[ + Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesSamplingParams(BaseModel): + max_completion_tokens: Optional[int] = None + """The maximum number of tokens in the generated output.""" + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class DataSourceResponses(BaseModel): + source: DataSourceResponsesSource + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + input_messages: Optional[DataSourceResponsesInputMessages] = None + + model: Optional[str] = None + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: Optional[DataSourceResponsesSamplingParams] = None + DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index 0df3e5c7ad..80327aa912 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,14 +9,219 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource -from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = ["RunListResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] +__all__ = [ + "RunListResponse", + "DataSource", + "DataSourceResponses", + "DataSourceResponsesSource", + "DataSourceResponsesSourceFileContent", + "DataSourceResponsesSourceFileContentContent", + "DataSourceResponsesSourceFileID", + "DataSourceResponsesSourceResponses", + "DataSourceResponsesInputMessages", + "DataSourceResponsesInputMessagesTemplate", + "DataSourceResponsesInputMessagesTemplateTemplate", + "DataSourceResponsesInputMessagesTemplateTemplateChatMessage", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesItemReference", + "DataSourceResponsesSamplingParams", + "PerModelUsage", + "PerTestingCriteriaResult", + "ResultCounts", +] + + +class DataSourceResponsesSourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class DataSourceResponsesSourceFileContent(BaseModel): + content: List[DataSourceResponsesSourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class DataSourceResponsesSourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + +class DataSourceResponsesSourceResponses(BaseModel): + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] = None + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] = None + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] = None + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] = None + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] = None + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] = None + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] = None + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] = None + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] = None + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] = None + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] = None + """List of user identifiers. This is a query parameter used to select responses.""" + + +DataSourceResponsesSource: TypeAlias = Annotated[ + Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): + content: str + """The content of the message.""" + + role: str + """The role of the message (e.g. "system", "assistant", "user").""" + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText +] + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" + + +DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[ + DataSourceResponsesInputMessagesTemplateTemplateChatMessage, + DataSourceResponsesInputMessagesTemplateTemplateEvalItem, +] + + +class DataSourceResponsesInputMessagesTemplate(BaseModel): + template: List[DataSourceResponsesInputMessagesTemplateTemplate] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Literal["template"] + """The type of input messages. Always `template`.""" + + +class DataSourceResponsesInputMessagesItemReference(BaseModel): + item_reference: str + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Literal["item_reference"] + """The type of input messages. Always `item_reference`.""" + + +DataSourceResponsesInputMessages: TypeAlias = Annotated[ + Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesSamplingParams(BaseModel): + max_completion_tokens: Optional[int] = None + """The maximum number of tokens in the generated output.""" + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class DataSourceResponses(BaseModel): + source: DataSourceResponsesSource + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + input_messages: Optional[DataSourceResponsesInputMessages] = None + + model: Optional[str] = None + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: Optional[DataSourceResponsesSamplingParams] = None + DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index 35cdb04efc..9756dcb919 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -1,6 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Union, Optional +from typing import Dict, List, Union, Optional from typing_extensions import Literal, Annotated, TypeAlias from pydantic import Field as FieldInfo @@ -9,14 +9,219 @@ from ..._models import BaseModel from .eval_api_error import EvalAPIError from ..shared.metadata import Metadata +from ..shared.reasoning_effort import ReasoningEffort +from ..responses.response_input_text import ResponseInputText from .create_eval_jsonl_run_data_source import CreateEvalJSONLRunDataSource -from .create_eval_responses_run_data_source import CreateEvalResponsesRunDataSource from .create_eval_completions_run_data_source import CreateEvalCompletionsRunDataSource -__all__ = ["RunRetrieveResponse", "DataSource", "PerModelUsage", "PerTestingCriteriaResult", "ResultCounts"] +__all__ = [ + "RunRetrieveResponse", + "DataSource", + "DataSourceResponses", + "DataSourceResponsesSource", + "DataSourceResponsesSourceFileContent", + "DataSourceResponsesSourceFileContentContent", + "DataSourceResponsesSourceFileID", + "DataSourceResponsesSourceResponses", + "DataSourceResponsesInputMessages", + "DataSourceResponsesInputMessagesTemplate", + "DataSourceResponsesInputMessagesTemplateTemplate", + "DataSourceResponsesInputMessagesTemplateTemplateChatMessage", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItem", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent", + "DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText", + "DataSourceResponsesInputMessagesItemReference", + "DataSourceResponsesSamplingParams", + "PerModelUsage", + "PerTestingCriteriaResult", + "ResultCounts", +] + + +class DataSourceResponsesSourceFileContentContent(BaseModel): + item: Dict[str, object] + + sample: Optional[Dict[str, object]] = None + + +class DataSourceResponsesSourceFileContent(BaseModel): + content: List[DataSourceResponsesSourceFileContentContent] + """The content of the jsonl file.""" + + type: Literal["file_content"] + """The type of jsonl source. Always `file_content`.""" + + +class DataSourceResponsesSourceFileID(BaseModel): + id: str + """The identifier of the file.""" + + type: Literal["file_id"] + """The type of jsonl source. Always `file_id`.""" + + +class DataSourceResponsesSourceResponses(BaseModel): + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + created_after: Optional[int] = None + """Only include items created after this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + created_before: Optional[int] = None + """Only include items created before this timestamp (inclusive). + + This is a query parameter used to select responses. + """ + + has_tool_calls: Optional[bool] = None + """Whether the response has tool calls. + + This is a query parameter used to select responses. + """ + + instructions_search: Optional[str] = None + """Optional string to search the 'instructions' field. + + This is a query parameter used to select responses. + """ + + metadata: Optional[object] = None + """Metadata filter for the responses. + + This is a query parameter used to select responses. + """ + + model: Optional[str] = None + """The name of the model to find responses for. + + This is a query parameter used to select responses. + """ + + reasoning_effort: Optional[ReasoningEffort] = None + """Optional reasoning effort parameter. + + This is a query parameter used to select responses. + """ + + temperature: Optional[float] = None + """Sampling temperature. This is a query parameter used to select responses.""" + + tools: Optional[List[str]] = None + """List of tool names. This is a query parameter used to select responses.""" + + top_p: Optional[float] = None + """Nucleus sampling parameter. This is a query parameter used to select responses.""" + + users: Optional[List[str]] = None + """List of user identifiers. This is a query parameter used to select responses.""" + + +DataSourceResponsesSource: TypeAlias = Annotated[ + Union[DataSourceResponsesSourceFileContent, DataSourceResponsesSourceFileID, DataSourceResponsesSourceResponses], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesInputMessagesTemplateTemplateChatMessage(BaseModel): + content: str + """The content of the message.""" + + role: str + """The role of the message (e.g. "system", "assistant", "user").""" + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent: TypeAlias = Union[ + str, ResponseInputText, DataSourceResponsesInputMessagesTemplateTemplateEvalItemContentOutputText +] + + +class DataSourceResponsesInputMessagesTemplateTemplateEvalItem(BaseModel): + content: DataSourceResponsesInputMessagesTemplateTemplateEvalItemContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" + + +DataSourceResponsesInputMessagesTemplateTemplate: TypeAlias = Union[ + DataSourceResponsesInputMessagesTemplateTemplateChatMessage, + DataSourceResponsesInputMessagesTemplateTemplateEvalItem, +] + + +class DataSourceResponsesInputMessagesTemplate(BaseModel): + template: List[DataSourceResponsesInputMessagesTemplateTemplate] + """A list of chat messages forming the prompt or context. + + May include variable references to the "item" namespace, ie {{item.name}}. + """ + + type: Literal["template"] + """The type of input messages. Always `template`.""" + + +class DataSourceResponsesInputMessagesItemReference(BaseModel): + item_reference: str + """A reference to a variable in the "item" namespace. Ie, "item.name" """ + + type: Literal["item_reference"] + """The type of input messages. Always `item_reference`.""" + + +DataSourceResponsesInputMessages: TypeAlias = Annotated[ + Union[DataSourceResponsesInputMessagesTemplate, DataSourceResponsesInputMessagesItemReference], + PropertyInfo(discriminator="type"), +] + + +class DataSourceResponsesSamplingParams(BaseModel): + max_completion_tokens: Optional[int] = None + """The maximum number of tokens in the generated output.""" + + seed: Optional[int] = None + """A seed value to initialize the randomness, during sampling.""" + + temperature: Optional[float] = None + """A higher temperature increases randomness in the outputs.""" + + top_p: Optional[float] = None + """An alternative to temperature for nucleus sampling; 1.0 includes all tokens.""" + + +class DataSourceResponses(BaseModel): + source: DataSourceResponsesSource + """A EvalResponsesSource object describing a run data source configuration.""" + + type: Literal["responses"] + """The type of run data source. Always `responses`.""" + + input_messages: Optional[DataSourceResponsesInputMessages] = None + + model: Optional[str] = None + """The name of the model to use for generating completions (e.g. "o3-mini").""" + + sampling_params: Optional[DataSourceResponsesSamplingParams] = None + DataSource: TypeAlias = Annotated[ - Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, CreateEvalResponsesRunDataSource], + Union[CreateEvalJSONLRunDataSource, CreateEvalCompletionsRunDataSource, DataSourceResponses], PropertyInfo(discriminator="type"), ] diff --git a/src/openai/types/graders/label_model_grader.py b/src/openai/types/graders/label_model_grader.py index 16f5b5aa1b..d95ccc6df6 100644 --- a/src/openai/types/graders/label_model_grader.py +++ b/src/openai/types/graders/label_model_grader.py @@ -1,16 +1,41 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias from ..._models import BaseModel -from ..shared.eval_item import EvalItem +from ..responses.response_input_text import ResponseInputText -__all__ = ["LabelModelGrader"] +__all__ = ["LabelModelGrader", "Input", "InputContent", "InputContentOutputText"] + + +class InputContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] + + +class Input(BaseModel): + content: InputContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" class LabelModelGrader(BaseModel): - input: List[EvalItem] + input: List[Input] labels: List[str] """The labels to assign to each item in the evaluation.""" diff --git a/src/openai/types/graders/label_model_grader_param.py b/src/openai/types/graders/label_model_grader_param.py index 34f5de7726..76d01421ee 100644 --- a/src/openai/types/graders/label_model_grader_param.py +++ b/src/openai/types/graders/label_model_grader_param.py @@ -2,16 +2,41 @@ from __future__ import annotations -from typing import List, Iterable -from typing_extensions import Literal, Required, TypedDict +from typing import List, Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..shared_params.eval_item import EvalItem +from ..responses.response_input_text_param import ResponseInputTextParam -__all__ = ["LabelModelGraderParam"] +__all__ = ["LabelModelGraderParam", "Input", "InputContent", "InputContentOutputText"] + + +class InputContentOutputText(TypedDict, total=False): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] + + +class Input(TypedDict, total=False): + content: Required[InputContent] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" class LabelModelGraderParam(TypedDict, total=False): - input: Required[Iterable[EvalItem]] + input: Required[Iterable[Input]] labels: Required[List[str]] """The labels to assign to each item in the evaluation.""" diff --git a/src/openai/types/graders/multi_grader.py b/src/openai/types/graders/multi_grader.py index ee9b31d2b0..220de2e61b 100644 --- a/src/openai/types/graders/multi_grader.py +++ b/src/openai/types/graders/multi_grader.py @@ -25,4 +25,4 @@ class MultiGrader(BaseModel): """The name of the grader.""" type: Literal["multi"] - """The type of grader.""" + """The object type, which is always `multi`.""" diff --git a/src/openai/types/graders/multi_grader_param.py b/src/openai/types/graders/multi_grader_param.py index 4dd1a48530..2984b5668f 100644 --- a/src/openai/types/graders/multi_grader_param.py +++ b/src/openai/types/graders/multi_grader_param.py @@ -28,4 +28,4 @@ class MultiGraderParam(TypedDict, total=False): """The name of the grader.""" type: Required[Literal["multi"]] - """The type of grader.""" + """The object type, which is always `multi`.""" diff --git a/src/openai/types/graders/score_model_grader.py b/src/openai/types/graders/score_model_grader.py index 6d81019c26..1349f75a58 100644 --- a/src/openai/types/graders/score_model_grader.py +++ b/src/openai/types/graders/score_model_grader.py @@ -1,16 +1,41 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. -from typing import List, Optional -from typing_extensions import Literal +from typing import List, Union, Optional +from typing_extensions import Literal, TypeAlias from ..._models import BaseModel -from ..shared.eval_item import EvalItem +from ..responses.response_input_text import ResponseInputText -__all__ = ["ScoreModelGrader"] +__all__ = ["ScoreModelGrader", "Input", "InputContent", "InputContentOutputText"] + + +class InputContentOutputText(BaseModel): + text: str + """The text output from the model.""" + + type: Literal["output_text"] + """The type of the output text. Always `output_text`.""" + + +InputContent: TypeAlias = Union[str, ResponseInputText, InputContentOutputText] + + +class Input(BaseModel): + content: InputContent + """Text inputs to the model - can contain template strings.""" + + role: Literal["user", "assistant", "system", "developer"] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Optional[Literal["message"]] = None + """The type of the message input. Always `message`.""" class ScoreModelGrader(BaseModel): - input: List[EvalItem] + input: List[Input] """The input text. This may include template strings.""" model: str diff --git a/src/openai/types/graders/score_model_grader_param.py b/src/openai/types/graders/score_model_grader_param.py index 3e0b9d08eb..673f14e47d 100644 --- a/src/openai/types/graders/score_model_grader_param.py +++ b/src/openai/types/graders/score_model_grader_param.py @@ -2,16 +2,41 @@ from __future__ import annotations -from typing import Iterable -from typing_extensions import Literal, Required, TypedDict +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypeAlias, TypedDict -from ..shared_params.eval_item import EvalItem +from ..responses.response_input_text_param import ResponseInputTextParam -__all__ = ["ScoreModelGraderParam"] +__all__ = ["ScoreModelGraderParam", "Input", "InputContent", "InputContentOutputText"] + + +class InputContentOutputText(TypedDict, total=False): + text: Required[str] + """The text output from the model.""" + + type: Required[Literal["output_text"]] + """The type of the output text. Always `output_text`.""" + + +InputContent: TypeAlias = Union[str, ResponseInputTextParam, InputContentOutputText] + + +class Input(TypedDict, total=False): + content: Required[InputContent] + """Text inputs to the model - can contain template strings.""" + + role: Required[Literal["user", "assistant", "system", "developer"]] + """The role of the message input. + + One of `user`, `assistant`, `system`, or `developer`. + """ + + type: Literal["message"] + """The type of the message input. Always `message`.""" class ScoreModelGraderParam(TypedDict, total=False): - input: Required[Iterable[EvalItem]] + input: Required[Iterable[Input]] """The input text. This may include template strings.""" model: Required[str] diff --git a/src/openai/types/shared/__init__.py b/src/openai/types/shared/__init__.py index 10450d8c70..6ad0ed5e01 100644 --- a/src/openai/types/shared/__init__.py +++ b/src/openai/types/shared/__init__.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata -from .eval_item import EvalItem as EvalItem from .reasoning import Reasoning as Reasoning from .all_models import AllModels as AllModels from .chat_model import ChatModel as ChatModel diff --git a/src/openai/types/shared/chat_model.py b/src/openai/types/shared/chat_model.py index 4869cd325c..75069e7a98 100644 --- a/src/openai/types/shared/chat_model.py +++ b/src/openai/types/shared/chat_model.py @@ -37,6 +37,7 @@ "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview-2025-03-11", "chatgpt-4o-latest", + "codex-mini-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", diff --git a/src/openai/types/shared/eval_item.py b/src/openai/types/shared/eval_item.py deleted file mode 100644 index f235d1ef17..0000000000 --- a/src/openai/types/shared/eval_item.py +++ /dev/null @@ -1,34 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing import Union, Optional -from typing_extensions import Literal, TypeAlias - -from ..._models import BaseModel -from ..responses.response_input_text import ResponseInputText - -__all__ = ["EvalItem", "Content", "ContentOutputText"] - - -class ContentOutputText(BaseModel): - text: str - """The text output from the model.""" - - type: Literal["output_text"] - """The type of the output text. Always `output_text`.""" - - -Content: TypeAlias = Union[str, ResponseInputText, ContentOutputText] - - -class EvalItem(BaseModel): - content: Content - """Text inputs to the model - can contain template strings.""" - - role: Literal["user", "assistant", "system", "developer"] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Optional[Literal["message"]] = None - """The type of the message input. Always `message`.""" diff --git a/src/openai/types/shared_params/__init__.py b/src/openai/types/shared_params/__init__.py index 68d16b90dc..8894710807 100644 --- a/src/openai/types/shared_params/__init__.py +++ b/src/openai/types/shared_params/__init__.py @@ -1,7 +1,6 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. from .metadata import Metadata as Metadata -from .eval_item import EvalItem as EvalItem from .reasoning import Reasoning as Reasoning from .chat_model import ChatModel as ChatModel from .compound_filter import CompoundFilter as CompoundFilter diff --git a/src/openai/types/shared_params/chat_model.py b/src/openai/types/shared_params/chat_model.py index 99e082fc11..c421744b8a 100644 --- a/src/openai/types/shared_params/chat_model.py +++ b/src/openai/types/shared_params/chat_model.py @@ -39,6 +39,7 @@ "gpt-4o-search-preview-2025-03-11", "gpt-4o-mini-search-preview-2025-03-11", "chatgpt-4o-latest", + "codex-mini-latest", "gpt-4o-mini", "gpt-4o-mini-2024-07-18", "gpt-4-turbo", diff --git a/src/openai/types/shared_params/eval_item.py b/src/openai/types/shared_params/eval_item.py deleted file mode 100644 index 7740ccc165..0000000000 --- a/src/openai/types/shared_params/eval_item.py +++ /dev/null @@ -1,35 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing import Union -from typing_extensions import Literal, Required, TypeAlias, TypedDict - -from ..responses.response_input_text_param import ResponseInputTextParam - -__all__ = ["EvalItem", "Content", "ContentOutputText"] - - -class ContentOutputText(TypedDict, total=False): - text: Required[str] - """The text output from the model.""" - - type: Required[Literal["output_text"]] - """The type of the output text. Always `output_text`.""" - - -Content: TypeAlias = Union[str, ResponseInputTextParam, ContentOutputText] - - -class EvalItem(TypedDict, total=False): - content: Required[Content] - """Text inputs to the model - can contain template strings.""" - - role: Required[Literal["user", "assistant", "system", "developer"]] - """The role of the message input. - - One of `user`, `assistant`, `system`, or `developer`. - """ - - type: Literal["message"] - """The type of the message input. Always `message`.""" diff --git a/src/openai/types/vector_store.py b/src/openai/types/vector_store.py index 2af120350e..2473a442d2 100644 --- a/src/openai/types/vector_store.py +++ b/src/openai/types/vector_store.py @@ -5,9 +5,8 @@ from .._models import BaseModel from .shared.metadata import Metadata -from .vector_store_expiration_after import VectorStoreExpirationAfter -__all__ = ["VectorStore", "FileCounts"] +__all__ = ["VectorStore", "FileCounts", "ExpiresAfter"] class FileCounts(BaseModel): @@ -27,6 +26,17 @@ class FileCounts(BaseModel): """The total number of files.""" +class ExpiresAfter(BaseModel): + anchor: Literal["last_active_at"] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: int + """The number of days after the anchor time that the vector store will expire.""" + + class VectorStore(BaseModel): id: str """The identifier, which can be referenced in API endpoints.""" @@ -65,7 +75,7 @@ class VectorStore(BaseModel): usage_bytes: int """The total number of bytes used by the files in the vector store.""" - expires_after: Optional[VectorStoreExpirationAfter] = None + expires_after: Optional[ExpiresAfter] = None """The expiration policy for a vector store.""" expires_at: Optional[int] = None diff --git a/src/openai/types/vector_store_create_params.py b/src/openai/types/vector_store_create_params.py index dbcedac188..365d0936b1 100644 --- a/src/openai/types/vector_store_create_params.py +++ b/src/openai/types/vector_store_create_params.py @@ -3,13 +3,12 @@ from __future__ import annotations from typing import List, Optional -from typing_extensions import TypedDict +from typing_extensions import Literal, Required, TypedDict from .shared_params.metadata import Metadata from .file_chunking_strategy_param import FileChunkingStrategyParam -from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam -__all__ = ["VectorStoreCreateParams"] +__all__ = ["VectorStoreCreateParams", "ExpiresAfter"] class VectorStoreCreateParams(TypedDict, total=False): @@ -20,7 +19,7 @@ class VectorStoreCreateParams(TypedDict, total=False): non-empty. """ - expires_after: VectorStoreExpirationAfterParam + expires_after: ExpiresAfter """The expiration policy for a vector store.""" file_ids: List[str] @@ -42,3 +41,14 @@ class VectorStoreCreateParams(TypedDict, total=False): name: str """The name of the vector store.""" + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["last_active_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: Required[int] + """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/openai/types/vector_store_expiration_after.py b/src/openai/types/vector_store_expiration_after.py deleted file mode 100644 index 1d417d526b..0000000000 --- a/src/openai/types/vector_store_expiration_after.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from typing_extensions import Literal - -from .._models import BaseModel - -__all__ = ["VectorStoreExpirationAfter"] - - -class VectorStoreExpirationAfter(BaseModel): - anchor: Literal["last_active_at"] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: int - """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/openai/types/vector_store_expiration_after_param.py b/src/openai/types/vector_store_expiration_after_param.py deleted file mode 100644 index 29a008c713..0000000000 --- a/src/openai/types/vector_store_expiration_after_param.py +++ /dev/null @@ -1,18 +0,0 @@ -# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. - -from __future__ import annotations - -from typing_extensions import Literal, Required, TypedDict - -__all__ = ["VectorStoreExpirationAfterParam"] - - -class VectorStoreExpirationAfterParam(TypedDict, total=False): - anchor: Required[Literal["last_active_at"]] - """Anchor timestamp after which the expiration policy applies. - - Supported anchors: `last_active_at`. - """ - - days: Required[int] - """The number of days after the anchor time that the vector store will expire.""" diff --git a/src/openai/types/vector_store_update_params.py b/src/openai/types/vector_store_update_params.py index 7c90784dfd..4f6ac63963 100644 --- a/src/openai/types/vector_store_update_params.py +++ b/src/openai/types/vector_store_update_params.py @@ -3,16 +3,15 @@ from __future__ import annotations from typing import Optional -from typing_extensions import TypedDict +from typing_extensions import Literal, Required, TypedDict from .shared_params.metadata import Metadata -from .vector_store_expiration_after_param import VectorStoreExpirationAfterParam -__all__ = ["VectorStoreUpdateParams"] +__all__ = ["VectorStoreUpdateParams", "ExpiresAfter"] class VectorStoreUpdateParams(TypedDict, total=False): - expires_after: Optional[VectorStoreExpirationAfterParam] + expires_after: Optional[ExpiresAfter] """The expiration policy for a vector store.""" metadata: Optional[Metadata] @@ -27,3 +26,14 @@ class VectorStoreUpdateParams(TypedDict, total=False): name: Optional[str] """The name of the vector store.""" + + +class ExpiresAfter(TypedDict, total=False): + anchor: Required[Literal["last_active_at"]] + """Anchor timestamp after which the expiration policy applies. + + Supported anchors: `last_active_at`. + """ + + days: Required[int] + """The number of days after the anchor time that the vector store will expire.""" From e9a89ab7b6387610e433550207a23973b7edda3a Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 13:44:14 -0400 Subject: [PATCH 6/8] fix: fix create audio transcription endpoint --- src/openai/resources/audio/transcriptions.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/openai/resources/audio/transcriptions.py b/src/openai/resources/audio/transcriptions.py index 9d4f7e9255..bca8210a83 100644 --- a/src/openai/resources/audio/transcriptions.py +++ b/src/openai/resources/audio/transcriptions.py @@ -78,6 +78,7 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, response_format: Literal["verbose_json"], language: str | NotGiven = NOT_GIVEN, @@ -98,6 +99,7 @@ def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, response_format: Literal["text", "srt", "vtt"], include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, @@ -374,11 +376,11 @@ async def create( model: Union[str, AudioModel], chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, - response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, language: str | NotGiven = NOT_GIVEN, prompt: str | NotGiven = NOT_GIVEN, + response_format: Union[Literal["json"], NotGiven] = NOT_GIVEN, + stream: Optional[Literal[False]] | NotGiven = NOT_GIVEN, temperature: float | NotGiven = NOT_GIVEN, - include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, timestamp_granularities: List[Literal["word", "segment"]] | NotGiven = NOT_GIVEN, # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs. # The extra values given here take precedence over values defined on the client or passed to this method. @@ -455,6 +457,7 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, response_format: Literal["verbose_json"], language: str | NotGiven = NOT_GIVEN, @@ -475,6 +478,7 @@ async def create( *, file: FileTypes, model: Union[str, AudioModel], + chunking_strategy: Optional[transcription_create_params.ChunkingStrategy] | NotGiven = NOT_GIVEN, include: List[TranscriptionInclude] | NotGiven = NOT_GIVEN, response_format: Literal["text", "srt", "vtt"], language: str | NotGiven = NOT_GIVEN, From 32c99a6f5885d4bf3511a7f06b70000edd274301 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:41:25 +0000 Subject: [PATCH 7/8] feat(api): further updates for evals API --- .stats.yml | 4 ++-- src/openai/resources/evals/evals.py | 22 ++++++++++++++----- src/openai/resources/evals/runs/runs.py | 14 +++++++----- .../transcription_session_updated_event.py | 2 +- src/openai/types/eval_create_params.py | 18 ++++++++++----- ...l_stored_completions_data_source_config.py | 4 ++-- ...create_eval_completions_run_data_source.py | 12 +++++++--- ..._eval_completions_run_data_source_param.py | 12 +++++++--- .../create_eval_jsonl_run_data_source.py | 1 + ...create_eval_jsonl_run_data_source_param.py | 1 + src/openai/types/evals/run_cancel_response.py | 18 +++++++-------- src/openai/types/evals/run_create_params.py | 18 +++++++-------- src/openai/types/evals/run_create_response.py | 18 +++++++-------- src/openai/types/evals/run_list_response.py | 18 +++++++-------- .../types/evals/run_retrieve_response.py | 18 +++++++-------- 15 files changed, 107 insertions(+), 73 deletions(-) diff --git a/.stats.yml b/.stats.yml index a3c5d081d4..afa33d93bd 100644 --- a/.stats.yml +++ b/.stats.yml @@ -1,4 +1,4 @@ configured_endpoints: 101 -openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-5fa16b9a02985ae06e41be14946a9c325dc672fb014b3c19abca65880c6990e6.yml -openapi_spec_hash: da3e669f65130043b1170048c0727890 +openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/openai%2Fopenai-262e171d0a8150ea1192474d16ba3afdf9a054b399f1a49a9c9b697a3073c136.yml +openapi_spec_hash: 33e00a48df8f94c94f46290c489f132b config_hash: d8d5fda350f6db77c784f35429741a2e diff --git a/src/openai/resources/evals/evals.py b/src/openai/resources/evals/evals.py index c12562a86d..7aba192c51 100644 --- a/src/openai/resources/evals/evals.py +++ b/src/openai/resources/evals/evals.py @@ -74,15 +74,20 @@ def create( ) -> EvalCreateResponse: """ Create the structure of an evaluation that can be used to test a model's - performance. An evaluation is a set of testing criteria and a datasource. After + performance. An evaluation is a set of testing criteria and the config for a + data source, which dictates the schema of the data used in the evaluation. After creating an evaluation, you can run it on different models and model parameters. We support several types of graders and datasources. For more information, see the [Evals guide](https://platform.openai.com/docs/guides/evals). Args: - data_source_config: The configuration for the data source used for the evaluation runs. + data_source_config: The configuration for the data source used for the evaluation runs. Dictates the + schema of the data used in the evaluation. - testing_criteria: A list of graders for all eval runs in this group. + testing_criteria: A list of graders for all eval runs in this group. Graders can reference + variables in the data source using double curly braces notation, like + `{{item.variable_name}}`. To reference the model's output, use the `sample` + namespace (ie, `{{sample.output_text}}`). metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and @@ -333,15 +338,20 @@ async def create( ) -> EvalCreateResponse: """ Create the structure of an evaluation that can be used to test a model's - performance. An evaluation is a set of testing criteria and a datasource. After + performance. An evaluation is a set of testing criteria and the config for a + data source, which dictates the schema of the data used in the evaluation. After creating an evaluation, you can run it on different models and model parameters. We support several types of graders and datasources. For more information, see the [Evals guide](https://platform.openai.com/docs/guides/evals). Args: - data_source_config: The configuration for the data source used for the evaluation runs. + data_source_config: The configuration for the data source used for the evaluation runs. Dictates the + schema of the data used in the evaluation. - testing_criteria: A list of graders for all eval runs in this group. + testing_criteria: A list of graders for all eval runs in this group. Graders can reference + variables in the data source using double curly braces notation, like + `{{item.variable_name}}`. To reference the model's output, use the `sample` + namespace (ie, `{{sample.output_text}}`). metadata: Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format, and diff --git a/src/openai/resources/evals/runs/runs.py b/src/openai/resources/evals/runs/runs.py index d74c91e3c4..7efc61292c 100644 --- a/src/openai/resources/evals/runs/runs.py +++ b/src/openai/resources/evals/runs/runs.py @@ -72,9 +72,10 @@ def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RunCreateResponse: - """Create a new evaluation run. - - This is the endpoint that will kick off grading. + """ + Kicks off a new run for a given evaluation, specifying the data source, and what + model configuration to use to test. The datasource will be validated against the + schema specified in the config of the evaluation. Args: data_source: Details about the run's data source. @@ -321,9 +322,10 @@ async def create( extra_body: Body | None = None, timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN, ) -> RunCreateResponse: - """Create a new evaluation run. - - This is the endpoint that will kick off grading. + """ + Kicks off a new run for a given evaluation, specifying the data source, and what + model configuration to use to test. The datasource will be validated against the + schema specified in the config of the evaluation. Args: data_source: Details about the run's data source. diff --git a/src/openai/types/beta/realtime/transcription_session_updated_event.py b/src/openai/types/beta/realtime/transcription_session_updated_event.py index ffc100bcc2..1f1fbdae14 100644 --- a/src/openai/types/beta/realtime/transcription_session_updated_event.py +++ b/src/openai/types/beta/realtime/transcription_session_updated_event.py @@ -16,7 +16,7 @@ class TranscriptionSessionUpdatedEvent(BaseModel): """A new Realtime transcription session configuration. When a session is created on the server via REST API, the session object also - contains an ephemeral key. Default TTL for keys is one minute. This property is + contains an ephemeral key. Default TTL for keys is 10 minutes. This property is not present when a session is updated via the WebSocket API. """ diff --git a/src/openai/types/eval_create_params.py b/src/openai/types/eval_create_params.py index 8d508a2d8e..20a3765481 100644 --- a/src/openai/types/eval_create_params.py +++ b/src/openai/types/eval_create_params.py @@ -33,10 +33,18 @@ class EvalCreateParams(TypedDict, total=False): data_source_config: Required[DataSourceConfig] - """The configuration for the data source used for the evaluation runs.""" + """The configuration for the data source used for the evaluation runs. + + Dictates the schema of the data used in the evaluation. + """ testing_criteria: Required[Iterable[TestingCriterion]] - """A list of graders for all eval runs in this group.""" + """A list of graders for all eval runs in this group. + + Graders can reference variables in the data source using double curly braces + notation, like `{{item.variable_name}}`. To reference the model's output, use + the `sample` namespace (ie, `{{sample.output_text}}`). + """ metadata: Optional[Metadata] """Set of 16 key-value pairs that can be attached to an object. @@ -75,8 +83,8 @@ class DataSourceConfigLogs(TypedDict, total=False): class DataSourceConfigStoredCompletions(TypedDict, total=False): - type: Required[Literal["stored-completions"]] - """The type of data source. Always `stored-completions`.""" + type: Required[Literal["stored_completions"]] + """The type of data source. Always `stored_completions`.""" metadata: Dict[str, object] """Metadata filters for the stored completions data source.""" @@ -129,7 +137,7 @@ class TestingCriterionLabelModel(TypedDict, total=False): input: Required[Iterable[TestingCriterionLabelModelInput]] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ labels: Required[List[str]] diff --git a/src/openai/types/eval_stored_completions_data_source_config.py b/src/openai/types/eval_stored_completions_data_source_config.py index 5016f0ae9c..98f86a4719 100644 --- a/src/openai/types/eval_stored_completions_data_source_config.py +++ b/src/openai/types/eval_stored_completions_data_source_config.py @@ -18,8 +18,8 @@ class EvalStoredCompletionsDataSourceConfig(BaseModel): [here](https://json-schema.org/). """ - type: Literal["stored-completions"] - """The type of data source. Always `stored-completions`.""" + type: Literal["stored_completions"] + """The type of data source. Always `stored_completions`.""" metadata: Optional[Metadata] = None """Set of 16 key-value pairs that can be attached to an object. diff --git a/src/openai/types/evals/create_eval_completions_run_data_source.py b/src/openai/types/evals/create_eval_completions_run_data_source.py index 29c687b542..064ef3a310 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source.py @@ -117,7 +117,7 @@ class InputMessagesTemplate(BaseModel): template: List[InputMessagesTemplateTemplate] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Literal["template"] @@ -126,7 +126,7 @@ class InputMessagesTemplate(BaseModel): class InputMessagesItemReference(BaseModel): item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" """ type: Literal["item_reference"] """The type of input messages. Always `item_reference`.""" @@ -153,12 +153,18 @@ class SamplingParams(BaseModel): class CreateEvalCompletionsRunDataSource(BaseModel): source: Source - """A StoredCompletionsRunDataSource configuration describing a set of filters""" + """Determines what populates the `item` namespace in this run's data source.""" type: Literal["completions"] """The type of run data source. Always `completions`.""" input_messages: Optional[InputMessages] = None + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: Optional[str] = None """The name of the model to use for generating completions (e.g. "o3-mini").""" diff --git a/src/openai/types/evals/create_eval_completions_run_data_source_param.py b/src/openai/types/evals/create_eval_completions_run_data_source_param.py index c53064ee27..3fa4c19ad2 100644 --- a/src/openai/types/evals/create_eval_completions_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_completions_run_data_source_param.py @@ -113,7 +113,7 @@ class InputMessagesTemplate(TypedDict, total=False): template: Required[Iterable[InputMessagesTemplateTemplate]] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Required[Literal["template"]] @@ -122,7 +122,7 @@ class InputMessagesTemplate(TypedDict, total=False): class InputMessagesItemReference(TypedDict, total=False): item_reference: Required[str] - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.input_trajectory" """ type: Required[Literal["item_reference"]] """The type of input messages. Always `item_reference`.""" @@ -147,12 +147,18 @@ class SamplingParams(TypedDict, total=False): class CreateEvalCompletionsRunDataSourceParam(TypedDict, total=False): source: Required[Source] - """A StoredCompletionsRunDataSource configuration describing a set of filters""" + """Determines what populates the `item` namespace in this run's data source.""" type: Required[Literal["completions"]] """The type of run data source. Always `completions`.""" input_messages: InputMessages + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: str """The name of the model to use for generating completions (e.g. "o3-mini").""" diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source.py b/src/openai/types/evals/create_eval_jsonl_run_data_source.py index d2be56243b..ae36f8c55f 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source.py @@ -36,6 +36,7 @@ class SourceFileID(BaseModel): class CreateEvalJSONLRunDataSource(BaseModel): source: Source + """Determines what populates the `item` namespace in the data source.""" type: Literal["jsonl"] """The type of data source. Always `jsonl`.""" diff --git a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py index b8ba48a666..217ee36346 100644 --- a/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py +++ b/src/openai/types/evals/create_eval_jsonl_run_data_source_param.py @@ -41,6 +41,7 @@ class SourceFileID(TypedDict, total=False): class CreateEvalJSONLRunDataSourceParam(TypedDict, total=False): source: Required[Source] + """Determines what populates the `item` namespace in the data source.""" type: Required[Literal["jsonl"]] """The type of data source. Always `jsonl`.""" diff --git a/src/openai/types/evals/run_cancel_response.py b/src/openai/types/evals/run_cancel_response.py index 318e7abc35..d3416129af 100644 --- a/src/openai/types/evals/run_cancel_response.py +++ b/src/openai/types/evals/run_cancel_response.py @@ -76,12 +76,6 @@ class DataSourceResponsesSourceResponses(BaseModel): This is a query parameter used to select responses. """ - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - instructions_search: Optional[str] = None """Optional string to search the 'instructions' field. @@ -170,7 +164,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): template: List[DataSourceResponsesInputMessagesTemplateTemplate] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Literal["template"] @@ -179,7 +173,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): class DataSourceResponsesInputMessagesItemReference(BaseModel): item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.name" """ type: Literal["item_reference"] """The type of input messages. Always `item_reference`.""" @@ -207,12 +201,18 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): source: DataSourceResponsesSource - """A EvalResponsesSource object describing a run data source configuration.""" + """Determines what populates the `item` namespace in this run's data source.""" type: Literal["responses"] """The type of run data source. Always `responses`.""" input_messages: Optional[DataSourceResponsesInputMessages] = None + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: Optional[str] = None """The name of the model to use for generating completions (e.g. "o3-mini").""" diff --git a/src/openai/types/evals/run_create_params.py b/src/openai/types/evals/run_create_params.py index e030224dcb..5aa2398f36 100644 --- a/src/openai/types/evals/run_create_params.py +++ b/src/openai/types/evals/run_create_params.py @@ -88,12 +88,6 @@ class DataSourceCreateEvalResponsesRunDataSourceSourceResponses(TypedDict, total This is a query parameter used to select responses. """ - has_tool_calls: Optional[bool] - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - instructions_search: Optional[str] """Optional string to search the 'instructions' field. @@ -187,7 +181,7 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, template: Required[Iterable[DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplate]] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Required[Literal["template"]] @@ -196,7 +190,7 @@ class DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplate(TypedDict, class DataSourceCreateEvalResponsesRunDataSourceInputMessagesItemReference(TypedDict, total=False): item_reference: Required[str] - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.name" """ type: Required[Literal["item_reference"]] """The type of input messages. Always `item_reference`.""" @@ -224,12 +218,18 @@ class DataSourceCreateEvalResponsesRunDataSourceSamplingParams(TypedDict, total= class DataSourceCreateEvalResponsesRunDataSource(TypedDict, total=False): source: Required[DataSourceCreateEvalResponsesRunDataSourceSource] - """A EvalResponsesSource object describing a run data source configuration.""" + """Determines what populates the `item` namespace in this run's data source.""" type: Required[Literal["responses"]] """The type of run data source. Always `responses`.""" input_messages: DataSourceCreateEvalResponsesRunDataSourceInputMessages + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: str """The name of the model to use for generating completions (e.g. "o3-mini").""" diff --git a/src/openai/types/evals/run_create_response.py b/src/openai/types/evals/run_create_response.py index 902e45c9bc..51aed2080f 100644 --- a/src/openai/types/evals/run_create_response.py +++ b/src/openai/types/evals/run_create_response.py @@ -76,12 +76,6 @@ class DataSourceResponsesSourceResponses(BaseModel): This is a query parameter used to select responses. """ - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - instructions_search: Optional[str] = None """Optional string to search the 'instructions' field. @@ -170,7 +164,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): template: List[DataSourceResponsesInputMessagesTemplateTemplate] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Literal["template"] @@ -179,7 +173,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): class DataSourceResponsesInputMessagesItemReference(BaseModel): item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.name" """ type: Literal["item_reference"] """The type of input messages. Always `item_reference`.""" @@ -207,12 +201,18 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): source: DataSourceResponsesSource - """A EvalResponsesSource object describing a run data source configuration.""" + """Determines what populates the `item` namespace in this run's data source.""" type: Literal["responses"] """The type of run data source. Always `responses`.""" input_messages: Optional[DataSourceResponsesInputMessages] = None + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: Optional[str] = None """The name of the model to use for generating completions (e.g. "o3-mini").""" diff --git a/src/openai/types/evals/run_list_response.py b/src/openai/types/evals/run_list_response.py index 80327aa912..f1d0b01da9 100644 --- a/src/openai/types/evals/run_list_response.py +++ b/src/openai/types/evals/run_list_response.py @@ -76,12 +76,6 @@ class DataSourceResponsesSourceResponses(BaseModel): This is a query parameter used to select responses. """ - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - instructions_search: Optional[str] = None """Optional string to search the 'instructions' field. @@ -170,7 +164,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): template: List[DataSourceResponsesInputMessagesTemplateTemplate] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Literal["template"] @@ -179,7 +173,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): class DataSourceResponsesInputMessagesItemReference(BaseModel): item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.name" """ type: Literal["item_reference"] """The type of input messages. Always `item_reference`.""" @@ -207,12 +201,18 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): source: DataSourceResponsesSource - """A EvalResponsesSource object describing a run data source configuration.""" + """Determines what populates the `item` namespace in this run's data source.""" type: Literal["responses"] """The type of run data source. Always `responses`.""" input_messages: Optional[DataSourceResponsesInputMessages] = None + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: Optional[str] = None """The name of the model to use for generating completions (e.g. "o3-mini").""" diff --git a/src/openai/types/evals/run_retrieve_response.py b/src/openai/types/evals/run_retrieve_response.py index 9756dcb919..6c5951b4eb 100644 --- a/src/openai/types/evals/run_retrieve_response.py +++ b/src/openai/types/evals/run_retrieve_response.py @@ -76,12 +76,6 @@ class DataSourceResponsesSourceResponses(BaseModel): This is a query parameter used to select responses. """ - has_tool_calls: Optional[bool] = None - """Whether the response has tool calls. - - This is a query parameter used to select responses. - """ - instructions_search: Optional[str] = None """Optional string to search the 'instructions' field. @@ -170,7 +164,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): template: List[DataSourceResponsesInputMessagesTemplateTemplate] """A list of chat messages forming the prompt or context. - May include variable references to the "item" namespace, ie {{item.name}}. + May include variable references to the `item` namespace, ie {{item.name}}. """ type: Literal["template"] @@ -179,7 +173,7 @@ class DataSourceResponsesInputMessagesTemplate(BaseModel): class DataSourceResponsesInputMessagesItemReference(BaseModel): item_reference: str - """A reference to a variable in the "item" namespace. Ie, "item.name" """ + """A reference to a variable in the `item` namespace. Ie, "item.name" """ type: Literal["item_reference"] """The type of input messages. Always `item_reference`.""" @@ -207,12 +201,18 @@ class DataSourceResponsesSamplingParams(BaseModel): class DataSourceResponses(BaseModel): source: DataSourceResponsesSource - """A EvalResponsesSource object describing a run data source configuration.""" + """Determines what populates the `item` namespace in this run's data source.""" type: Literal["responses"] """The type of run data source. Always `responses`.""" input_messages: Optional[DataSourceResponsesInputMessages] = None + """Used when sampling from a model. + + Dictates the structure of the messages passed into the model. Can either be a + reference to a prebuilt trajectory (ie, `item.input_trajectory`), or a template + with variable references to the `item` namespace. + """ model: Optional[str] = None """The name of the model to use for generating completions (e.g. "o3-mini").""" From d3accfb7f885037130fbd284d41f18915de07509 Mon Sep 17 00:00:00 2001 From: "stainless-app[bot]" <142633134+stainless-app[bot]@users.noreply.github.com> Date: Fri, 16 May 2025 19:41:54 +0000 Subject: [PATCH 8/8] release: 1.79.0 --- .release-please-manifest.json | 2 +- CHANGELOG.md | 22 ++++++++++++++++++++++ pyproject.toml | 2 +- src/openai/_version.py | 2 +- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index f15af035f8..36925cfe97 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.78.1" + ".": "1.79.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index b153f3ef05..9ec3e61533 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,27 @@ # Changelog +## 1.79.0 (2025-05-16) + +Full Changelog: [v1.78.1...v1.79.0](https://github.com/openai/openai-python/compare/v1.78.1...v1.79.0) + +### Features + +* **api:** further updates for evals API ([32c99a6](https://github.com/openai/openai-python/commit/32c99a6f5885d4bf3511a7f06b70000edd274301)) +* **api:** manual updates ([25245e5](https://github.com/openai/openai-python/commit/25245e5e3d0713abfb65b760aee1f12bc61deb41)) +* **api:** responses x eval api ([fd586cb](https://github.com/openai/openai-python/commit/fd586cbdf889c9a5c6b9be177ff02fbfffa3eba5)) +* **api:** Updating Assistants and Evals API schemas ([98ba7d3](https://github.com/openai/openai-python/commit/98ba7d355551213a13803f68d5642eecbb4ffd39)) + + +### Bug Fixes + +* fix create audio transcription endpoint ([e9a89ab](https://github.com/openai/openai-python/commit/e9a89ab7b6387610e433550207a23973b7edda3a)) + + +### Chores + +* **ci:** fix installation instructions ([f26c5fc](https://github.com/openai/openai-python/commit/f26c5fc85d98d700b68cb55c8be5d15983a9aeaf)) +* **ci:** upload sdks to package manager ([861f105](https://github.com/openai/openai-python/commit/861f1055768168ab04987a42efcd32a07bc93542)) + ## 1.78.1 (2025-05-12) Full Changelog: [v1.78.0...v1.78.1](https://github.com/openai/openai-python/compare/v1.78.0...v1.78.1) diff --git a/pyproject.toml b/pyproject.toml index 71c86c38ea..5affe3c483 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "openai" -version = "1.78.1" +version = "1.79.0" description = "The official Python library for the openai API" dynamic = ["readme"] license = "Apache-2.0" diff --git a/src/openai/_version.py b/src/openai/_version.py index 9b430dfa8b..77c73cdfd9 100644 --- a/src/openai/_version.py +++ b/src/openai/_version.py @@ -1,4 +1,4 @@ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. __title__ = "openai" -__version__ = "1.78.1" # x-release-please-version +__version__ = "1.79.0" # x-release-please-version