From 866979ba2b9c196b997ad08110e721f11db286e8 Mon Sep 17 00:00:00 2001 From: mukunda katta Date: Sat, 18 Apr 2026 01:32:45 -0700 Subject: [PATCH 1/4] Handle null response output_text items --- src/openai/lib/_parsing/_responses.py | 2 +- src/openai/types/responses/response.py | 2 +- .../types/responses/response_output_text.py | 2 +- tests/lib/responses/test_responses.py | 122 ++++++++++++++++++ 4 files changed, 125 insertions(+), 3 deletions(-) diff --git a/src/openai/lib/_parsing/_responses.py b/src/openai/lib/_parsing/_responses.py index 8853a0749f..259b636802 100644 --- a/src/openai/lib/_parsing/_responses.py +++ b/src/openai/lib/_parsing/_responses.py @@ -71,7 +71,7 @@ def parse_response( type_=ParsedResponseOutputText[TextFormatT], value={ **item.to_dict(), - "parsed": parse_text(item.text, text_format=text_format), + "parsed": parse_text(item.text, text_format=text_format) if item.text is not None else None, }, ) ) diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index ada0783bce..e32a017e85 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -315,7 +315,7 @@ def output_text(self) -> str: for output in self.output: if output.type == "message": for content in output.content: - if content.type == "output_text": + if content.type == "output_text" and content.text is not None: texts.append(content.text) return "".join(texts) diff --git a/src/openai/types/responses/response_output_text.py b/src/openai/types/responses/response_output_text.py index 2386fcb3c0..4c10877109 100644 --- a/src/openai/types/responses/response_output_text.py +++ b/src/openai/types/responses/response_output_text.py @@ -122,7 +122,7 @@ class ResponseOutputText(BaseModel): annotations: List[Annotation] """The annotations of the text output.""" - text: str + text: Optional[str] = None """The text output from the model.""" type: Literal["output_text"] diff --git a/tests/lib/responses/test_responses.py b/tests/lib/responses/test_responses.py index 8e5f16df95..b692f720df 100644 --- a/tests/lib/responses/test_responses.py +++ b/tests/lib/responses/test_responses.py @@ -3,11 +3,14 @@ from typing_extensions import TypeVar import pytest +from pydantic import BaseModel from respx import MockRouter from inline_snapshot import snapshot from openai import OpenAI, AsyncOpenAI +from openai.lib._parsing._responses import parse_response from openai._utils import assert_signatures_in_sync +from openai.types.responses import Response from ...conftest import base_url from ..snapshots import make_snapshot_request @@ -41,6 +44,125 @@ def test_output_text(client: OpenAI, respx_mock: MockRouter) -> None: ) +def test_output_text_ignores_null_items() -> None: + response = Response.model_validate( + { + "id": "resp_null_output_text", + "object": "response", + "created_at": 0, + "status": "completed", + "background": False, + "error": None, + "incomplete_details": None, + "instructions": None, + "max_output_tokens": None, + "max_tool_calls": None, + "model": "gpt-4o-mini", + "output": [ + { + "id": "msg_null_output_text", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + {"type": "output_text", "annotations": [], "logprobs": [], "text": None}, + {"type": "output_text", "annotations": [], "logprobs": [], "text": '{"message":"hello"}'}, + ], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "prompt_cache_key": None, + "reasoning": {"effort": None, "summary": None}, + "safety_identifier": None, + "service_tier": "default", + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}, "verbosity": "medium"}, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 1, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 1, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 2, + }, + "user": None, + "metadata": {}, + } + ) + + assert response.output_text == '{"message":"hello"}' + + +def test_parse_response_skips_null_output_text() -> None: + class ParsedMessage(BaseModel): + message: str + + response = Response.model_validate( + { + "id": "resp_null_output_text", + "object": "response", + "created_at": 0, + "status": "completed", + "background": False, + "error": None, + "incomplete_details": None, + "instructions": None, + "max_output_tokens": None, + "max_tool_calls": None, + "model": "gpt-4o-mini", + "output": [ + { + "id": "msg_null_output_text", + "type": "message", + "status": "completed", + "role": "assistant", + "content": [ + {"type": "output_text", "annotations": [], "logprobs": [], "text": None}, + {"type": "output_text", "annotations": [], "logprobs": [], "text": '{"message":"hello"}'}, + ], + } + ], + "parallel_tool_calls": True, + "previous_response_id": None, + "prompt_cache_key": None, + "reasoning": {"effort": None, "summary": None}, + "safety_identifier": None, + "service_tier": "default", + "store": True, + "temperature": 1.0, + "text": {"format": {"type": "text"}, "verbosity": "medium"}, + "tool_choice": "auto", + "tools": [], + "top_logprobs": 0, + "top_p": 1.0, + "truncation": "disabled", + "usage": { + "input_tokens": 1, + "input_tokens_details": {"cached_tokens": 0}, + "output_tokens": 1, + "output_tokens_details": {"reasoning_tokens": 0}, + "total_tokens": 2, + }, + "user": None, + "metadata": {}, + } + ) + + parsed = parse_response(text_format=ParsedMessage, input_tools=[], response=response) + message_output = parsed.output[0] + assert message_output.type == "message" + assert message_output.content[0].type == "output_text" + assert message_output.content[0].parsed is None + assert message_output.content[1].type == "output_text" + assert message_output.content[1].parsed == ParsedMessage(message="hello") + + @pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"]) def test_stream_method_definition_in_sync(sync: bool, client: OpenAI, async_client: AsyncOpenAI) -> None: checking_client: OpenAI | AsyncOpenAI = client if sync else async_client From b39b7cdb4b49c124d3af1f04e6601f7b783dd711 Mon Sep 17 00:00:00 2001 From: mukunda katta Date: Tue, 21 Apr 2026 08:09:05 -0700 Subject: [PATCH 2/4] docs(helpers): fix vector_stores polling examples --- helpers.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/helpers.md b/helpers.md index 89ff4498cf..b6dd1d4e8a 100644 --- a/helpers.md +++ b/helpers.md @@ -510,9 +510,9 @@ The polling methods are: client.beta.threads.create_and_run_poll(...) client.beta.threads.runs.create_and_poll(...) client.beta.threads.runs.submit_tool_outputs_and_poll(...) -client.beta.vector_stores.files.upload_and_poll(...) -client.beta.vector_stores.files.create_and_poll(...) -client.beta.vector_stores.file_batches.create_and_poll(...) -client.beta.vector_stores.file_batches.upload_and_poll(...) +client.vector_stores.files.upload_and_poll(...) +client.vector_stores.files.create_and_poll(...) +client.vector_stores.file_batches.create_and_poll(...) +client.vector_stores.file_batches.upload_and_poll(...) client.videos.create_and_poll(...) ``` From bbdee6d0a5a5700d762a82fd113238a6edbaf596 Mon Sep 17 00:00:00 2001 From: mukunda katta Date: Tue, 21 Apr 2026 08:11:13 -0700 Subject: [PATCH 3/4] fix(types): use in_memory prompt cache retention literal --- .../resources/chat/completions/completions.py | 24 +++++++------- src/openai/resources/responses/responses.py | 32 +++++++++---------- .../types/chat/completion_create_params.py | 2 +- src/openai/types/responses/response.py | 2 +- .../types/responses/response_create_params.py | 2 +- .../types/responses/responses_client_event.py | 2 +- .../responses/responses_client_event_param.py | 2 +- tests/api_resources/chat/test_completions.py | 8 ++--- tests/api_resources/test_responses.py | 8 ++--- 9 files changed, 41 insertions(+), 41 deletions(-) diff --git a/src/openai/resources/chat/completions/completions.py b/src/openai/resources/chat/completions/completions.py index 845bd1a1e1..8b4fc12ae9 100644 --- a/src/openai/resources/chat/completions/completions.py +++ b/src/openai/resources/chat/completions/completions.py @@ -109,7 +109,7 @@ def parse( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -264,7 +264,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -571,7 +571,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -877,7 +877,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -1182,7 +1182,7 @@ def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -1461,7 +1461,7 @@ def stream( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -1612,7 +1612,7 @@ async def parse( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, @@ -1767,7 +1767,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2074,7 +2074,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2380,7 +2380,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2685,7 +2685,7 @@ async def create( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, response_format: completion_create_params.ResponseFormat | Omit = omit, safety_identifier: str | Omit = omit, @@ -2964,7 +2964,7 @@ def stream( prediction: Optional[ChatCompletionPredictionContentParam] | Omit = omit, presence_penalty: Optional[float] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning_effort: Optional[ReasoningEffort] | Omit = omit, safety_identifier: str | Omit = omit, seed: Optional[int] | Omit = omit, diff --git a/src/openai/resources/responses/responses.py b/src/openai/resources/responses/responses.py index ab5f7688a5..cb9a09bf22 100644 --- a/src/openai/resources/responses/responses.py +++ b/src/openai/resources/responses/responses.py @@ -146,7 +146,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -396,7 +396,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -645,7 +645,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -892,7 +892,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -995,7 +995,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1036,7 +1036,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1187,7 +1187,7 @@ def parse( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -1823,7 +1823,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2073,7 +2073,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2322,7 +2322,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2569,7 +2569,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2672,7 +2672,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2713,7 +2713,7 @@ def stream( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -2868,7 +2868,7 @@ async def parse( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -4543,7 +4543,7 @@ def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, @@ -4623,7 +4623,7 @@ async def create( previous_response_id: Optional[str] | Omit = omit, prompt: Optional[ResponsePromptParam] | Omit = omit, prompt_cache_key: str | Omit = omit, - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] | Omit = omit, + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] | Omit = omit, reasoning: Optional[Reasoning] | Omit = omit, safety_identifier: str | Omit = omit, service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] | Omit = omit, diff --git a/src/openai/types/chat/completion_create_params.py b/src/openai/types/chat/completion_create_params.py index 8e71ccbe41..0379ee0865 100644 --- a/src/openai/types/chat/completion_create_params.py +++ b/src/openai/types/chat/completion_create_params.py @@ -185,7 +185,7 @@ class CompletionCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/response.py b/src/openai/types/responses/response.py index e32a017e85..7dc193c061 100644 --- a/src/openai/types/responses/response.py +++ b/src/openai/types/responses/response.py @@ -214,7 +214,7 @@ class Response(BaseModel): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = None """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/response_create_params.py b/src/openai/types/responses/response_create_params.py index bf7170da1f..a04495f40a 100644 --- a/src/openai/types/responses/response_create_params.py +++ b/src/openai/types/responses/response_create_params.py @@ -152,7 +152,7 @@ class ResponseCreateParamsBase(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/responses_client_event.py b/src/openai/types/responses/responses_client_event.py index 2bc6f899c5..5f9e73c61f 100644 --- a/src/openai/types/responses/responses_client_event.py +++ b/src/openai/types/responses/responses_client_event.py @@ -184,7 +184,7 @@ class ResponsesClientEvent(BaseModel): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] = None + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = None """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/src/openai/types/responses/responses_client_event_param.py b/src/openai/types/responses/responses_client_event_param.py index 08596ef9ea..249c812116 100644 --- a/src/openai/types/responses/responses_client_event_param.py +++ b/src/openai/types/responses/responses_client_event_param.py @@ -185,7 +185,7 @@ class ResponsesClientEventParam(TypedDict, total=False): [Learn more](https://platform.openai.com/docs/guides/prompt-caching). """ - prompt_cache_retention: Optional[Literal["in-memory", "24h"]] + prompt_cache_retention: Optional[Literal["in_memory", "24h"]] """The retention policy for the prompt cache. Set to `24h` to enable extended prompt caching, which keeps cached prefixes diff --git a/tests/api_resources/chat/test_completions.py b/tests/api_resources/chat/test_completions.py index a75764b5e9..ea3066a505 100644 --- a/tests/api_resources/chat/test_completions.py +++ b/tests/api_resources/chat/test_completions.py @@ -73,7 +73,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", @@ -207,7 +207,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", @@ -516,7 +516,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", @@ -650,7 +650,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn }, presence_penalty=-2, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning_effort="none", response_format={"type": "text"}, safety_identifier="safety-identifier-1234", diff --git a/tests/api_resources/test_responses.py b/tests/api_resources/test_responses.py index 0b871d525d..096b983be2 100644 --- a/tests/api_resources/test_responses.py +++ b/tests/api_resources/test_responses.py @@ -52,7 +52,7 @@ def test_method_create_with_all_params_overload_1(self, client: OpenAI) -> None: "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -140,7 +140,7 @@ def test_method_create_with_all_params_overload_2(self, client: OpenAI) -> None: "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -463,7 +463,7 @@ async def test_method_create_with_all_params_overload_1(self, async_client: Asyn "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", @@ -551,7 +551,7 @@ async def test_method_create_with_all_params_overload_2(self, async_client: Asyn "version": "version", }, prompt_cache_key="prompt-cache-key-1234", - prompt_cache_retention="in-memory", + prompt_cache_retention="in_memory", reasoning={ "effort": "none", "generate_summary": "auto", From f50aec91c12e044494358ef7af0c5b78ae3ccae3 Mon Sep 17 00:00:00 2001 From: Mukunda Rao Katta Date: Wed, 22 Apr 2026 15:41:47 -0700 Subject: [PATCH 4/4] fix(streaming): normalize output_text text=None before delta append Per @chatgpt-codex-connector review: making ResponseOutputText.text Optional allows response.content_part.added snapshots to carry text=None. Streaming accumulation then hits 'content.text += event.delta' with a NoneType and raises TypeError, terminating parsing. Normalize text to '' before the first delta append so the existing streaming invariant ('text is a string') holds regardless of the initial content_part.added shape. --- src/openai/lib/streaming/responses/_responses.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/openai/lib/streaming/responses/_responses.py b/src/openai/lib/streaming/responses/_responses.py index 6975a9260d..2a8422754e 100644 --- a/src/openai/lib/streaming/responses/_responses.py +++ b/src/openai/lib/streaming/responses/_responses.py @@ -351,6 +351,11 @@ def accumulate_event(self, event: RawResponseStreamEvent) -> ParsedResponseSnaps if output.type == "message": content = output.content[event.content_index] assert content.type == "output_text" + # content_part.added snapshots may carry text=None when the + # server streams a not-yet-materialized part. Normalize to "" + # so the first delta doesn't raise TypeError on '+='. + if content.text is None: + content.text = "" content.text += event.delta elif event.type == "response.function_call_arguments.delta": output = snapshot.output[event.output_index]