diff --git a/docs/agents.md b/docs/agents.md index 4dd9f9ea0e..c37deddfe7 100644 --- a/docs/agents.md +++ b/docs/agents.md @@ -321,6 +321,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), @@ -385,6 +386,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), @@ -1049,6 +1051,7 @@ with capture_run_messages() as messages: # (2)! timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -1073,6 +1076,7 @@ with capture_run_messages() as messages: # (2)! timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/docs/api/models/function.md b/docs/api/models/function.md index 4cdceb449f..bfbe35c469 100644 --- a/docs/api/models/function.md +++ b/docs/api/models/function.md @@ -30,6 +30,7 @@ async def model_function( timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ] diff --git a/docs/deferred-tools.md b/docs/deferred-tools.md index 31e14149c0..fc5dc2eaa4 100644 --- a/docs/deferred-tools.md +++ b/docs/deferred-tools.md @@ -118,6 +118,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -152,6 +153,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelRequest( @@ -173,6 +175,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -197,6 +200,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -324,6 +328,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -350,6 +355,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/docs/message-history.md b/docs/message-history.md index 3363312fed..7045c87c9a 100644 --- a/docs/message-history.md +++ b/docs/message-history.md @@ -51,6 +51,7 @@ print(result.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -95,6 +96,7 @@ async def main(): timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ) ] @@ -122,6 +124,7 @@ async def main(): timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -178,6 +181,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -198,6 +202,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -303,6 +308,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -323,6 +329,7 @@ print(result2.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/docs/testing.md b/docs/testing.md index 3089585ab0..7178039af0 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -128,6 +128,7 @@ async def test_forecast(): timestamp=IsNow(tz=timezone.utc), # (7)! ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -158,6 +159,7 @@ async def test_forecast(): timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/docs/tools.md b/docs/tools.md index 40dcf5c810..2b8d9247b8 100644 --- a/docs/tools.md +++ b/docs/tools.md @@ -88,6 +88,7 @@ print(dice_result.all_messages()) timestamp=datetime.datetime(...), ), ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -110,6 +111,7 @@ print(dice_result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( @@ -132,6 +134,7 @@ print(dice_result.all_messages()) timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ), ModelResponse( diff --git a/pydantic_ai_slim/pydantic_ai/_a2a.py b/pydantic_ai_slim/pydantic_ai/_a2a.py index eee3832b1b..77a07e935f 100644 --- a/pydantic_ai_slim/pydantic_ai/_a2a.py +++ b/pydantic_ai_slim/pydantic_ai/_a2a.py @@ -25,6 +25,7 @@ ToolCallPart, UserPromptPart, VideoUrl, + _utils, ) from .agent import AbstractAgent, AgentDepsT, OutputDataT @@ -200,7 +201,9 @@ def build_message_history(self, history: list[Message]) -> list[ModelMessage]: model_messages: list[ModelMessage] = [] for message in history: if message['role'] == 'user': - model_messages.append(ModelRequest(parts=self._request_parts_from_a2a(message['parts']))) + model_messages.append( + ModelRequest(parts=self._request_parts_from_a2a(message['parts']), timestamp=_utils.now_utc()) + ) else: model_messages.append(ModelResponse(parts=self._response_parts_from_a2a(message['parts']))) return model_messages diff --git a/pydantic_ai_slim/pydantic_ai/_agent_graph.py b/pydantic_ai_slim/pydantic_ai/_agent_graph.py index 043c27c4f5..3d996c56cf 100644 --- a/pydantic_ai_slim/pydantic_ai/_agent_graph.py +++ b/pydantic_ai_slim/pydantic_ai/_agent_graph.py @@ -19,7 +19,7 @@ from pydantic_ai._function_schema import _takes_ctx as is_takes_ctx # type: ignore from pydantic_ai._instrumentation import DEFAULT_INSTRUMENTATION_VERSION from pydantic_ai._tool_manager import ToolManager -from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, run_in_executor +from pydantic_ai._utils import dataclasses_no_defaults_repr, get_union_args, is_async_callable, now_utc, run_in_executor from pydantic_ai.builtin_tools import AbstractBuiltinTool from pydantic_graph import BaseNode, GraphRunContext from pydantic_graph.beta import Graph, GraphBuilder @@ -492,6 +492,7 @@ async def _make_request( async def _prepare_request( self, ctx: GraphRunContext[GraphAgentState, GraphAgentDeps[DepsT, NodeRunEndT]] ) -> tuple[ModelSettings | None, models.ModelRequestParameters, list[_messages.ModelMessage], RunContext[DepsT]]: + self.request.timestamp = now_utc() self.request.run_id = self.request.run_id or ctx.state.run_id ctx.state.message_history.append(self.request) @@ -509,6 +510,11 @@ async def _prepare_request( # Update the new message index to ensure `result.new_messages()` returns the correct messages ctx.deps.new_message_index -= len(original_history) - len(message_history) + # Ensure the last request has a timestamp (history processors may create new ModelRequest objects without one) + last_request = message_history[-1] + if isinstance(last_request, _messages.ModelRequest) and last_request.timestamp is None: + last_request.timestamp = self.request.timestamp + # Merge possible consecutive trailing `ModelRequest`s into one, with tool call parts before user parts, # but don't store it in the message history on state. This is just for the benefit of model classes that want clear user/assistant boundaries. # See `tests/test_tools.py::test_parallel_tool_return_with_deferred` for an example where this is necessary @@ -785,7 +791,7 @@ def _handle_final_result( # For backwards compatibility, append a new ModelRequest using the tool returns and retries if tool_responses: - messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id)) + messages.append(_messages.ModelRequest(parts=tool_responses, run_id=ctx.state.run_id, timestamp=now_utc())) return End(final_result) @@ -1340,6 +1346,7 @@ def _clean_message_history(messages: list[_messages.ModelMessage]) -> list[_mess merged_message = _messages.ModelRequest( parts=parts, instructions=last_message.instructions or message.instructions, + timestamp=message.timestamp or last_message.timestamp, ) clean_messages[-1] = merged_message else: diff --git a/pydantic_ai_slim/pydantic_ai/_mcp.py b/pydantic_ai_slim/pydantic_ai/_mcp.py index 1729e4c225..2e2bd69b72 100644 --- a/pydantic_ai_slim/pydantic_ai/_mcp.py +++ b/pydantic_ai_slim/pydantic_ai/_mcp.py @@ -4,7 +4,7 @@ from collections.abc import Sequence from typing import Literal -from . import exceptions, messages +from . import _utils, exceptions, messages try: from mcp import types as mcp_types @@ -44,7 +44,7 @@ def map_from_mcp_params(params: mcp_types.CreateMessageRequestParams) -> list[me # role is assistant # if there are any request parts, add a request message wrapping them if request_parts: - pai_messages.append(messages.ModelRequest(parts=request_parts)) + pai_messages.append(messages.ModelRequest(parts=request_parts, timestamp=_utils.now_utc())) request_parts = [] response_parts.append(map_from_sampling_content(content)) @@ -52,7 +52,7 @@ def map_from_mcp_params(params: mcp_types.CreateMessageRequestParams) -> list[me if response_parts: pai_messages.append(messages.ModelResponse(parts=response_parts)) if request_parts: - pai_messages.append(messages.ModelRequest(parts=request_parts)) + pai_messages.append(messages.ModelRequest(parts=request_parts, timestamp=_utils.now_utc())) return pai_messages diff --git a/pydantic_ai_slim/pydantic_ai/agent/__init__.py b/pydantic_ai_slim/pydantic_ai/agent/__init__.py index 19edb4a619..6d8c08f742 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/__init__.py +++ b/pydantic_ai_slim/pydantic_ai/agent/__init__.py @@ -509,6 +509,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/agent/abstract.py b/pydantic_ai_slim/pydantic_ai/agent/abstract.py index 96d4d23766..708bfd9d81 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/abstract.py +++ b/pydantic_ai_slim/pydantic_ai/agent/abstract.py @@ -560,7 +560,11 @@ async def on_complete() -> None: # For backwards compatibility, append a new ModelRequest using the tool returns and retries if parts: - messages.append(_messages.ModelRequest(parts, run_id=graph_ctx.state.run_id)) + messages.append( + _messages.ModelRequest( + parts, run_id=graph_ctx.state.run_id, timestamp=_utils.now_utc() + ) + ) await agent_run.next(_agent_graph.SetFinalResult(final_result)) @@ -1005,6 +1009,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py index 6e275b3f50..15ae149425 100644 --- a/pydantic_ai_slim/pydantic_ai/agent/wrapper.py +++ b/pydantic_ai_slim/pydantic_ai/agent/wrapper.py @@ -165,6 +165,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py index c5adf5221d..79465fe8b0 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/dbos/_agent.py @@ -824,6 +824,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py index 60c8122686..ba38aa9304 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/prefect/_agent.py @@ -769,6 +769,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py index 42fc2a872e..0016d76084 100644 --- a/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py +++ b/pydantic_ai_slim/pydantic_ai/durable_exec/temporal/_agent.py @@ -843,6 +843,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/messages.py b/pydantic_ai_slim/pydantic_ai/messages.py index a855036de3..168c9f7feb 100644 --- a/pydantic_ai_slim/pydantic_ai/messages.py +++ b/pydantic_ai_slim/pydantic_ai/messages.py @@ -997,6 +997,9 @@ class ModelRequest: _: KW_ONLY + timestamp: datetime | None = None + """The timestamp when the request was sent to the model.""" + instructions: str | None = None """The instructions for the model.""" @@ -1012,7 +1015,7 @@ class ModelRequest: @classmethod def user_text_prompt(cls, user_prompt: str, *, instructions: str | None = None) -> ModelRequest: """Create a `ModelRequest` with a single user prompt as text.""" - return cls(parts=[UserPromptPart(user_prompt)], instructions=instructions) + return cls(parts=[UserPromptPart(user_prompt)], instructions=instructions, timestamp=_now_utc()) __repr__ = _utils.dataclasses_no_defaults_repr @@ -1238,9 +1241,10 @@ class ModelResponse: """The name of the model that generated the response.""" timestamp: datetime = field(default_factory=_now_utc) - """The timestamp of the response. + """The timestamp when the response was received locally. - If the model provides a timestamp in the response (as OpenAI does) that will be used. + This is always a high-precision local datetime. Provider-specific timestamps + (if available) are stored in `provider_details['timestamp']`. """ kind: Literal['response'] = 'response' diff --git a/pydantic_ai_slim/pydantic_ai/models/anthropic.py b/pydantic_ai_slim/pydantic_ai/models/anthropic.py index 28395f56bd..2cc36647c9 100644 --- a/pydantic_ai_slim/pydantic_ai/models/anthropic.py +++ b/pydantic_ai_slim/pydantic_ai/models/anthropic.py @@ -531,6 +531,7 @@ def _process_response(self, response: BetaMessage) -> ModelResponse: parts=items, usage=_map_usage(response, self._provider.name, self._provider.base_url, self._model_name), model_name=response.model, + timestamp=_utils.now_utc(), provider_response_id=response.id, provider_name=self._provider.name, finish_reason=finish_reason, @@ -551,7 +552,6 @@ async def _process_streamed_response( model_request_parameters=model_request_parameters, _model_name=first_chunk.message.model, _response=peekable_response, - _timestamp=_utils.now_utc(), _provider_name=self._provider.name, _provider_url=self._provider.base_url, ) @@ -1113,9 +1113,9 @@ class AnthropicStreamedResponse(StreamedResponse): _model_name: AnthropicModelName _response: AsyncIterable[BetaRawMessageStreamEvent] - _timestamp: datetime _provider_name: str _provider_url: str + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 current_block: BetaContentBlock | None = None diff --git a/pydantic_ai_slim/pydantic_ai/models/google.py b/pydantic_ai_slim/pydantic_ai/models/google.py index 8aca93c720..0adaa73ba9 100644 --- a/pydantic_ai_slim/pydantic_ai/models/google.py +++ b/pydantic_ai_slim/pydantic_ai/models/google.py @@ -486,13 +486,17 @@ def _process_response(self, response: GenerateContentResponse) -> ModelResponse: candidate = response.candidates[0] vendor_id = response.response_id - vendor_details: dict[str, Any] | None = None finish_reason: FinishReason | None = None + vendor_details: dict[str, Any] = {} + raw_finish_reason = candidate.finish_reason if raw_finish_reason: # pragma: no branch - vendor_details = {'finish_reason': raw_finish_reason.value} + vendor_details['finish_reason'] = raw_finish_reason.value finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if response.create_time is not None: # pragma: no branch + vendor_details['timestamp'] = response.create_time + if candidate.content is None or candidate.content.parts is None: if finish_reason == 'content_filter' and raw_finish_reason: raise UnexpectedModelBehavior( @@ -510,7 +514,7 @@ def _process_response(self, response: GenerateContentResponse) -> ModelResponse: self._provider.name, usage, vendor_id=vendor_id, - vendor_details=vendor_details, + vendor_details=vendor_details or None, finish_reason=finish_reason, url_context_metadata=candidate.url_context_metadata, ) @@ -528,9 +532,9 @@ async def _process_streamed_response( model_request_parameters=model_request_parameters, _model_name=first_chunk.model_version or self._model_name, _response=peekable_response, - _timestamp=first_chunk.create_time or _utils.now_utc(), _provider_name=self._provider.name, _provider_url=self._provider.base_url, + _provider_timestamp=first_chunk.create_time, ) async def _map_messages( @@ -652,9 +656,10 @@ class GeminiStreamedResponse(StreamedResponse): _model_name: GoogleModelName _response: AsyncIterator[GenerateContentResponse] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: datetime | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 code_execution_tool_call_id: str | None = None @@ -670,9 +675,15 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: self.provider_response_id = chunk.response_id raw_finish_reason = candidate.finish_reason + provider_details_dict: dict[str, Any] = {} if raw_finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason.value} + provider_details_dict['finish_reason'] = raw_finish_reason.value self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: + # _provider_timestamp is always None in Google streaming cassettes + provider_details_dict['timestamp'] = self._provider_timestamp # pragma: no cover + if provider_details_dict: + self.provider_details = provider_details_dict # Google streams the grounding metadata (including the web search queries and results) # _after_ the text that was generated using it, so it would show up out of order in the stream, @@ -923,6 +934,7 @@ def _process_response_from_parts( return ModelResponse( parts=items, model_name=model_name, + timestamp=_utils.now_utc(), usage=usage, provider_response_id=vendor_id, provider_details=vendor_details, diff --git a/pydantic_ai_slim/pydantic_ai/models/groq.py b/pydantic_ai_slim/pydantic_ai/models/groq.py index 780ee0b305..bc86a0a68b 100644 --- a/pydantic_ai_slim/pydantic_ai/models/groq.py +++ b/pydantic_ai_slim/pydantic_ai/models/groq.py @@ -320,7 +320,6 @@ async def _completions_create( def _process_response(self, response: chat.ChatCompletion) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - timestamp = number_to_datetime(response.created) choice = response.choices[0] items: list[ModelResponsePart] = [] if choice.message.reasoning is not None: @@ -340,13 +339,15 @@ def _process_response(self, response: chat.ChatCompletion) -> ModelResponse: items.append(ToolCallPart(tool_name=c.function.name, args=c.function.arguments, tool_call_id=c.id)) raw_finish_reason = choice.finish_reason - provider_details = {'finish_reason': raw_finish_reason} + provider_details: dict[str, Any] = {'finish_reason': raw_finish_reason} + if response.created: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(response.created) finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) return ModelResponse( parts=items, usage=_map_usage(response), model_name=response.model, - timestamp=timestamp, + timestamp=_utils.now_utc(), provider_response_id=response.id, provider_name=self._provider.name, finish_reason=finish_reason, @@ -369,8 +370,8 @@ async def _process_streamed_response( _response=peekable_response, _model_name=first_chunk.model, _model_profile=self.profile, - _timestamp=number_to_datetime(first_chunk.created), _provider_name=self._provider.name, + _provider_timestamp=first_chunk.created, ) def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[chat.ChatCompletionToolParam]: @@ -522,8 +523,9 @@ class GroqStreamedResponse(StreamedResponse): _model_name: GroqModelName _model_profile: ModelProfile _response: AsyncIterable[chat.ChatCompletionChunk] - _timestamp: datetime _provider_name: str + _provider_timestamp: int | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 try: @@ -541,9 +543,14 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: except IndexError: continue + provider_details_dict: dict[str, Any] = {} if raw_finish_reason := choice.finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details_dict['finish_reason'] = raw_finish_reason self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: # pragma: no branch + provider_details_dict['timestamp'] = number_to_datetime(self._provider_timestamp) + if provider_details_dict: # pragma: no branch + self.provider_details = provider_details_dict if choice.delta.reasoning is not None: if not reasoning: diff --git a/pydantic_ai_slim/pydantic_ai/models/huggingface.py b/pydantic_ai_slim/pydantic_ai/models/huggingface.py index f439b3ccb6..06dd71b7fd 100644 --- a/pydantic_ai_slim/pydantic_ai/models/huggingface.py +++ b/pydantic_ai_slim/pydantic_ai/models/huggingface.py @@ -267,10 +267,7 @@ async def _completions_create( def _process_response(self, response: ChatCompletionOutput) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - if response.created: - timestamp = datetime.fromtimestamp(response.created, tz=timezone.utc) - else: - timestamp = _now_utc() + timestamp = _now_utc() choice = response.choices[0] content = choice.message.content @@ -285,7 +282,9 @@ def _process_response(self, response: ChatCompletionOutput) -> ModelResponse: items.append(ToolCallPart(c.function.name, c.function.arguments, tool_call_id=c.id)) raw_finish_reason = choice.finish_reason - provider_details = {'finish_reason': raw_finish_reason} + provider_details: dict[str, Any] = {'finish_reason': raw_finish_reason} + if response.created: # pragma: no branch + provider_details['timestamp'] = datetime.fromtimestamp(response.created, tz=timezone.utc) finish_reason = _FINISH_REASON_MAP.get(cast(TextGenerationOutputFinishReason, raw_finish_reason), None) return ModelResponse( @@ -315,8 +314,8 @@ async def _process_streamed_response( _model_name=first_chunk.model, _model_profile=self.profile, _response=peekable_response, - _timestamp=datetime.fromtimestamp(first_chunk.created, tz=timezone.utc), _provider_name=self._provider.name, + _provider_timestamp=first_chunk.created, ) def _get_tools(self, model_request_parameters: ModelRequestParameters) -> list[ChatCompletionInputTool]: @@ -463,8 +462,9 @@ class HuggingFaceStreamedResponse(StreamedResponse): _model_name: str _model_profile: ModelProfile _response: AsyncIterable[ChatCompletionStreamOutput] - _timestamp: datetime _provider_name: str + _provider_timestamp: int | None = None + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: async for chunk in self._response: @@ -478,11 +478,16 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: except IndexError: continue + provider_details_dict: dict[str, Any] = {} if raw_finish_reason := choice.finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details_dict['finish_reason'] = raw_finish_reason self.finish_reason = _FINISH_REASON_MAP.get( cast(TextGenerationOutputFinishReason, raw_finish_reason), None ) + if self._provider_timestamp is not None: # pragma: no branch + provider_details_dict['timestamp'] = datetime.fromtimestamp(self._provider_timestamp, tz=timezone.utc) + if provider_details_dict: # pragma: no branch + self.provider_details = provider_details_dict # Handle the text part of the response content = choice.delta.content diff --git a/pydantic_ai_slim/pydantic_ai/models/mistral.py b/pydantic_ai_slim/pydantic_ai/models/mistral.py index 2a3752c370..c9747e7dba 100644 --- a/pydantic_ai_slim/pydantic_ai/models/mistral.py +++ b/pydantic_ai_slim/pydantic_ai/models/mistral.py @@ -348,10 +348,7 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes """Process a non-streamed response, and prepare a message to return.""" assert response.choices, 'Unexpected empty response choice.' - if response.created: - timestamp = number_to_datetime(response.created) - else: - timestamp = _now_utc() + timestamp = _now_utc() choice = response.choices[0] content = choice.message.content @@ -370,7 +367,9 @@ def _process_response(self, response: MistralChatCompletionResponse) -> ModelRes parts.append(tool) raw_finish_reason = choice.finish_reason - provider_details = {'finish_reason': raw_finish_reason} + provider_details: dict[str, Any] = {'finish_reason': raw_finish_reason} + if response.created: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(response.created) finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) return ModelResponse( @@ -397,17 +396,12 @@ async def _process_streamed_response( 'Streamed response ended without content or tool calls' ) - if first_chunk.data.created: - timestamp = number_to_datetime(first_chunk.data.created) - else: - timestamp = _now_utc() - return MistralStreamedResponse( model_request_parameters=model_request_parameters, _response=peekable_response, _model_name=first_chunk.data.model, - _timestamp=timestamp, _provider_name=self._provider.name, + _provider_timestamp=first_chunk.data.created, ) @staticmethod @@ -613,8 +607,9 @@ class MistralStreamedResponse(StreamedResponse): _model_name: MistralModelName _response: AsyncIterable[MistralCompletionEvent] - _timestamp: datetime _provider_name: str + _provider_timestamp: int | None = None + _timestamp: datetime = field(default_factory=_now_utc) _delta_content: str = field(default='', init=False) @@ -631,9 +626,14 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: except IndexError: continue + provider_details_dict: dict[str, Any] = {} if raw_finish_reason := choice.finish_reason: - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details_dict['finish_reason'] = raw_finish_reason self.finish_reason = _FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: # pragma: no branch + provider_details_dict['timestamp'] = number_to_datetime(self._provider_timestamp) + if provider_details_dict: # pragma: no branch + self.provider_details = provider_details_dict # Handle the text part of the response content = choice.delta.content diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py index a37be4f024..197db7599c 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openai.py +++ b/pydantic_ai_slim/pydantic_ai/models/openai.py @@ -567,7 +567,7 @@ def _validate_completion(self, response: chat.ChatCompletion) -> chat.ChatComple """ return chat.ChatCompletion.model_validate(response.model_dump()) - def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any]: + def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any] | None: """Hook that response content to provider details. This method may be overridden by subclasses of `OpenAIChatModel` to apply custom mappings. @@ -585,10 +585,8 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons f'Invalid response from {self.system} chat completions endpoint, expected JSON data' ) - if response.created: - timestamp = number_to_datetime(response.created) - else: - timestamp = _now_utc() + timestamp = _now_utc() + if not response.created: response.created = int(timestamp.timestamp()) # Workaround for local Ollama which sometimes returns a `None` finish reason. @@ -624,12 +622,18 @@ def _process_response(self, response: chat.ChatCompletion | str) -> ModelRespons part.tool_call_id = _guard_tool_call_id(part) items.append(part) + provider_details = self._process_provider_details(response) + if response.created: # pragma: no branch + if provider_details is None: + provider_details = {} + provider_details['timestamp'] = number_to_datetime(response.created) + return ModelResponse( parts=items, usage=self._map_usage(response), model_name=response.model, timestamp=timestamp, - provider_details=self._process_provider_details(response), + provider_details=provider_details or None, provider_response_id=response.id, provider_name=self._provider.name, finish_reason=self._map_finish_reason(choice.finish_reason), @@ -683,9 +687,9 @@ async def _process_streamed_response( _model_name=model_name, _model_profile=self.profile, _response=peekable_response, - _timestamp=number_to_datetime(first_chunk.created), _provider_name=self._provider.name, _provider_url=self._provider.base_url, + _provider_timestamp=first_chunk.created, ) @property @@ -1149,14 +1153,16 @@ def _process_response( # noqa: C901 self, response: responses.Response, model_request_parameters: ModelRequestParameters ) -> ModelResponse: """Process a non-streamed response, and prepare a message to return.""" - timestamp = number_to_datetime(response.created_at) + timestamp = _now_utc() items: list[ModelResponsePart] = [] for item in response.output: if isinstance(item, responses.ResponseReasoningItem): signature = item.encrypted_content # Handle raw CoT content from gpt-oss models + provider_details: dict[str, Any] = {} raw_content: list[str] | None = [c.text for c in item.content] if item.content else None - provider_details: dict[str, Any] | None = {'raw_content': raw_content} if raw_content else None + if raw_content: + provider_details['raw_content'] = raw_content if item.summary: for summary in item.summary: @@ -1167,7 +1173,7 @@ def _process_response( # noqa: C901 id=item.id, signature=signature, provider_name=self.system if (signature or provider_details) else None, - provider_details=provider_details, + provider_details=provider_details or None, ) ) # We only need to store the signature and raw_content once. @@ -1180,7 +1186,7 @@ def _process_response( # noqa: C901 id=item.id, signature=signature, provider_name=self.system if (signature or provider_details) else None, - provider_details=provider_details, + provider_details=provider_details or None, ) ) elif isinstance(item, responses.ResponseOutputMessage): @@ -1240,11 +1246,13 @@ def _process_response( # noqa: C901 pass finish_reason: FinishReason | None = None - provider_details: dict[str, Any] | None = None + provider_details: dict[str, Any] = {} raw_finish_reason = details.reason if (details := response.incomplete_details) else response.status if raw_finish_reason: - provider_details = {'finish_reason': raw_finish_reason} + provider_details['finish_reason'] = raw_finish_reason finish_reason = _RESPONSES_FINISH_REASON_MAP.get(raw_finish_reason) + if response.created_at: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(response.created_at) return ModelResponse( parts=items, @@ -1254,7 +1262,7 @@ def _process_response( # noqa: C901 timestamp=timestamp, provider_name=self._provider.name, finish_reason=finish_reason, - provider_details=provider_details, + provider_details=provider_details or None, ) async def _process_streamed_response( @@ -1273,9 +1281,10 @@ async def _process_streamed_response( model_request_parameters=model_request_parameters, _model_name=first_chunk.response.model, _response=peekable_response, - _timestamp=number_to_datetime(first_chunk.response.created_at), _provider_name=self._provider.name, _provider_url=self._provider.base_url, + # type of created_at is float but it's actually a Unix timestamp in seconds + _provider_timestamp=int(first_chunk.response.created_at), ) @overload @@ -1879,9 +1888,10 @@ class OpenAIStreamedResponse(StreamedResponse): _model_name: OpenAIModelName _model_profile: ModelProfile _response: AsyncIterable[ChatCompletionChunk] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: int | None = None + _timestamp: datetime = field(default_factory=_now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: async for chunk in self._validate_response(): @@ -1905,7 +1915,7 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: if raw_finish_reason := choice.finish_reason: self.finish_reason = self._map_finish_reason(raw_finish_reason) - if provider_details := self._map_provider_details(chunk): + if provider_details := self._map_provider_details(chunk): # pragma: no branch self.provider_details = provider_details for event in self._map_part_delta(choice): @@ -1998,7 +2008,12 @@ def _map_provider_details(self, chunk: ChatCompletionChunk) -> dict[str, Any] | This method may be overridden by subclasses of `OpenAIStreamResponse` to customize the provider details. """ - return _map_provider_details(chunk.choices[0]) + provider_details = _map_provider_details(chunk.choices[0]) + if self._provider_timestamp is not None: # pragma: no branch + if provider_details is None: + provider_details = {} + provider_details['timestamp'] = number_to_datetime(self._provider_timestamp) + return provider_details or None def _map_usage(self, response: ChatCompletionChunk) -> usage.RequestUsage: return _map_usage(response, self._provider_name, self._provider_url, self.model_name) @@ -2034,9 +2049,10 @@ class OpenAIResponsesStreamedResponse(StreamedResponse): _model_name: OpenAIModelName _response: AsyncIterable[responses.ResponseStreamEvent] - _timestamp: datetime _provider_name: str _provider_url: str + _provider_timestamp: int | None = None + _timestamp: datetime = field(default_factory=_now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: # noqa: C901 async for chunk in self._response: @@ -2047,9 +2063,13 @@ async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: raw_finish_reason = ( details.reason if (details := chunk.response.incomplete_details) else chunk.response.status ) + provider_details: dict[str, Any] = {} if raw_finish_reason: # pragma: no branch - self.provider_details = {'finish_reason': raw_finish_reason} + provider_details['finish_reason'] = raw_finish_reason self.finish_reason = _RESPONSES_FINISH_REASON_MAP.get(raw_finish_reason) + if self._provider_timestamp is not None: # pragma: no branch + provider_details['timestamp'] = number_to_datetime(self._provider_timestamp) + self.provider_details = provider_details or None elif isinstance(chunk, responses.ResponseContentPartAddedEvent): pass # there's nothing we need to do here @@ -2439,7 +2459,7 @@ def _map_usage( def _map_provider_details( choice: chat_completion_chunk.Choice | chat_completion.Choice, -) -> dict[str, Any]: +) -> dict[str, Any] | None: provider_details: dict[str, Any] = {} # Add logprobs to vendor_details if available @@ -2448,7 +2468,7 @@ def _map_provider_details( if raw_finish_reason := choice.finish_reason: provider_details['finish_reason'] = raw_finish_reason - return provider_details + return provider_details or None def _split_combined_tool_call_id(combined_id: str) -> tuple[str, str | None]: diff --git a/pydantic_ai_slim/pydantic_ai/models/openrouter.py b/pydantic_ai_slim/pydantic_ai/models/openrouter.py index fd3d7dfce4..18bfa358e3 100644 --- a/pydantic_ai_slim/pydantic_ai/models/openrouter.py +++ b/pydantic_ai_slim/pydantic_ai/models/openrouter.py @@ -453,7 +453,8 @@ def _map_openrouter_provider_details( provider_details: dict[str, Any] = {} provider_details['downstream_provider'] = response.provider - provider_details['finish_reason'] = response.choices[0].native_finish_reason + if native_finish_reason := response.choices[0].native_finish_reason: + provider_details['finish_reason'] = native_finish_reason if usage := response.usage: if cost := usage.cost: @@ -547,12 +548,13 @@ def _process_thinking(self, message: chat.ChatCompletionMessage) -> list[Thinkin return super()._process_thinking(message) @override - def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any]: + def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any] | None: assert isinstance(response, _OpenRouterChatCompletion) provider_details = super()._process_provider_details(response) - provider_details.update(_map_openrouter_provider_details(response)) - return provider_details + if openrouter_details := _map_openrouter_provider_details(response): + provider_details = {**(provider_details or {}), **openrouter_details} + return provider_details or None @dataclass class _MapModelResponseContext(OpenAIChatModel._MapModelResponseContext): # type: ignore[reportPrivateUsage] @@ -665,8 +667,16 @@ def _map_thinking_delta(self, choice: chat_completion_chunk.Choice) -> Iterable[ def _map_provider_details(self, chunk: chat.ChatCompletionChunk) -> dict[str, Any] | None: assert isinstance(chunk, _OpenRouterChatCompletionChunk) - if provider_details := super()._map_provider_details(chunk): + if provider_details := super()._map_provider_details(chunk): # pragma: no branch provider_details.update(_map_openrouter_provider_details(chunk)) + # Preserve finish_reason from previous chunk if the current chunk doesn't have one. + # After the chunk with native_finish_reason 'completed', OpenRouter sends one more + # chunk with usage data (see cassette test_openrouter_stream_with_native_options.yaml) + # which has native_finish_reason: null. Since provider_details is replaced on each + # chunk, we need to carry forward the finish_reason from the previous chunk. + if 'finish_reason' not in provider_details and self.provider_details: # pragma: no branch + if previous_finish_reason := self.provider_details.get('finish_reason'): + provider_details['finish_reason'] = previous_finish_reason return provider_details @override diff --git a/pydantic_ai_slim/pydantic_ai/models/outlines.py b/pydantic_ai_slim/pydantic_ai/models/outlines.py index 34efece6fa..4354cd8d1f 100644 --- a/pydantic_ai_slim/pydantic_ai/models/outlines.py +++ b/pydantic_ai_slim/pydantic_ai/models/outlines.py @@ -8,8 +8,8 @@ import io from collections.abc import AsyncIterable, AsyncIterator, Sequence from contextlib import asynccontextmanager -from dataclasses import dataclass, replace -from datetime import datetime, timezone +from dataclasses import dataclass, field, replace +from datetime import datetime from typing import TYPE_CHECKING, Any, Literal, cast from typing_extensions import assert_never @@ -505,6 +505,7 @@ def _process_response(self, response: str) -> ModelResponse: parts=cast( list[ModelResponsePart], split_content_into_text_and_thinking(response, self.profile.thinking_tags) ), + timestamp=_utils.now_utc(), ) async def _process_streamed_response( @@ -516,13 +517,11 @@ async def _process_streamed_response( if isinstance(first_chunk, _utils.Unset): # pragma: no cover raise UnexpectedModelBehavior('Streamed response ended without content or tool calls') - timestamp = datetime.now(tz=timezone.utc) return OutlinesStreamedResponse( model_request_parameters=model_request_parameters, _model_name=self._model_name, _model_profile=self.profile, _response=peekable_response, - _timestamp=timestamp, _provider_name='outlines', ) @@ -542,8 +541,8 @@ class OutlinesStreamedResponse(StreamedResponse): _model_name: str _model_profile: ModelProfile _response: AsyncIterable[str] - _timestamp: datetime _provider_name: str + _timestamp: datetime = field(default_factory=_utils.now_utc) async def _get_event_iterator(self) -> AsyncIterator[ModelResponseStreamEvent]: async for content in self._response: diff --git a/pydantic_ai_slim/pydantic_ai/run.py b/pydantic_ai_slim/pydantic_ai/run.py index 0ed3e2455d..81529856f8 100644 --- a/pydantic_ai_slim/pydantic_ai/run.py +++ b/pydantic_ai_slim/pydantic_ai/run.py @@ -64,6 +64,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), @@ -243,6 +244,7 @@ async def main(): timestamp=datetime.datetime(...), ) ], + timestamp=datetime.datetime(...), run_id='...', ) ), diff --git a/pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py b/pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py index 6a2edf1715..5d9207c318 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_messages_builder.py @@ -1,7 +1,7 @@ from dataclasses import dataclass, field from typing import cast -from pydantic_ai._utils import get_union_args +from pydantic_ai._utils import get_union_args, now_utc as _now_utc from pydantic_ai.messages import ModelMessage, ModelRequest, ModelRequestPart, ModelResponse, ModelResponsePart @@ -19,7 +19,7 @@ def add(self, part: ModelRequestPart | ModelResponsePart) -> None: if isinstance(last_message, ModelRequest): last_message.parts = [*last_message.parts, part] else: - self.messages.append(ModelRequest(parts=[part])) + self.messages.append(ModelRequest(parts=[part], timestamp=_now_utc())) else: part = cast(ModelResponsePart, part) if isinstance(last_message, ModelResponse): diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index 2df96b0278..b75e82fc3c 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -5,7 +5,7 @@ import re from collections.abc import Callable, Sequence from dataclasses import dataclass, field -from datetime import timezone +from datetime import datetime, timezone from decimal import Decimal from functools import cached_property from typing import Annotated, Any, TypeVar, cast @@ -239,6 +239,7 @@ async def test_sync_request_text_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -254,6 +255,7 @@ async def test_sync_request_text_response(allow_model_requests: None): ), ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1054,6 +1056,7 @@ async def test_request_structured_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1082,6 +1085,7 @@ async def test_request_structured_response(allow_model_requests: None): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1124,6 +1128,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1152,6 +1157,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1180,6 +1186,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1534,6 +1541,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1575,6 +1583,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1734,6 +1743,7 @@ def simple_instructions(): [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1771,6 +1781,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1828,6 +1839,7 @@ async def test_anthropic_model_thinking_part(allow_model_requests: None, anthrop timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1890,6 +1902,7 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1936,6 +1949,7 @@ async def test_anthropic_model_thinking_part_redacted(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1995,6 +2009,7 @@ async def test_anthropic_model_thinking_part_redacted_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2138,6 +2153,7 @@ async def test_anthropic_model_thinking_part_from_other_model( timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2148,33 +2164,21 @@ async def test_anthropic_model_thinking_part_from_other_model( signature=IsStr(), provider_name='openai', ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7', - ), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), + ThinkingPart(content=IsStr(), id='rs_68c1fda7b4d481a1a65f48aef6a6b85e06da9901a3d98ab7'), TextPart(content=IsStr(), id='msg_68c1fdbecbf081a18085a084257a9aef06da9901a3d98ab7'), ], usage=RequestUsage(input_tokens=23, output_tokens=2211, details={'reasoning_tokens': 1920}), model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 10, 22, 37, 27, tzinfo=timezone.utc), + }, provider_response_id='resp_68c1fda6f11081a1b9fa80ae9122743506da9901a3d98ab7', finish_reason='stop', run_id=IsStr(), @@ -2200,6 +2204,7 @@ async def test_anthropic_model_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2256,6 +2261,7 @@ async def test_anthropic_model_thinking_part_stream(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2640,7 +2646,7 @@ async def test_anthropic_model_empty_message_on_history(allow_model_requests: No result = await agent.run( 'I need a potato!', message_history=[ - ModelRequest(parts=[], instructions='You are a helpful assistant.', kind='request'), + ModelRequest(parts=[], instructions='You are a helpful assistant.', kind='request', timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='Hello, how can I help you?')], kind='response'), ], ) @@ -2666,6 +2672,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a [ ModelRequest( parts=[UserPromptPart(content='What is the weather in San Francisco today?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2874,6 +2881,7 @@ async def test_anthropic_web_search_tool(allow_model_requests: None, anthropic_a timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3087,6 +3095,7 @@ async def test_anthropic_model_web_search_tool_stream(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4133,6 +4142,7 @@ async def test_anthropic_web_fetch_tool(allow_model_requests: None, anthropic_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4214,6 +4224,7 @@ async def test_anthropic_web_fetch_tool(allow_model_requests: None, anthropic_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4283,6 +4294,7 @@ async def test_anthropic_web_fetch_tool(allow_model_requests: None, anthropic_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4379,6 +4391,7 @@ async def test_anthropic_web_fetch_tool_stream( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4839,7 +4852,7 @@ async def test_anthropic_web_fetch_tool_message_replay(): # Create message history with BuiltinToolCallPart and BuiltinToolReturnPart messages = [ - ModelRequest(parts=[UserPromptPart(content='Test')]), + ModelRequest(parts=[UserPromptPart(content='Test')], timestamp=IsDatetime()), ModelResponse( parts=[ BuiltinToolCallPart( @@ -4993,6 +5006,7 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5079,6 +5093,7 @@ async def test_anthropic_mcp_servers(allow_model_requests: None, anthropic_api_k timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5244,6 +5259,7 @@ async def test_anthropic_mcp_servers_stream(allow_model_requests: None, anthropi timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5506,6 +5522,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop [ ModelRequest( parts=[UserPromptPart(content='How much is 3 * 12390?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='Always use the code execution tool for math.', run_id=IsStr(), ), @@ -5573,6 +5590,7 @@ async def test_anthropic_code_execution_tool(allow_model_requests: None, anthrop timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='Always use the code execution tool for math.', run_id=IsStr(), ), @@ -5654,6 +5672,7 @@ async def test_anthropic_code_execution_tool_stream(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6238,6 +6257,7 @@ async def test_anthropic_server_tool_pass_history_to_another_provider( [ ModelRequest( parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6251,7 +6271,10 @@ async def test_anthropic_server_tool_pass_history_to_another_provider( model_name='gpt-4.1-2025-04-14', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 11, 19, 23, 41, 8, tzinfo=timezone.utc), + }, provider_response_id='resp_0dcd74f01910b54500691e5594957481a0ac36dde76eca939f', finish_reason='stop', run_id=IsStr(), @@ -6295,14 +6318,16 @@ async def test_anthropic_empty_content_filtering(env: TestEnv): # Test _map_message with empty string in user prompt messages_empty_string: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content='')], kind='request'), + ModelRequest(parts=[UserPromptPart(content='')], kind='request', timestamp=IsDatetime()), ] _, anthropic_messages = await model._map_message(messages_empty_string, ModelRequestParameters(), {}) # type: ignore[attr-defined] assert anthropic_messages == snapshot([]) # Empty content should be filtered out # Test _map_message with list containing empty strings in user prompt messages_mixed_content: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content=['', 'Hello', '', 'World'])], kind='request'), + ModelRequest( + parts=[UserPromptPart(content=['', 'Hello', '', 'World'])], kind='request', timestamp=IsDatetime() + ), ] _, anthropic_messages = await model._map_message(messages_mixed_content, ModelRequestParameters(), {}) # type: ignore[attr-defined] assert anthropic_messages == snapshot( @@ -6311,9 +6336,9 @@ async def test_anthropic_empty_content_filtering(env: TestEnv): # Test _map_message with empty assistant response messages: list[ModelMessage] = [ - ModelRequest(parts=[SystemPromptPart(content='You are helpful')], kind='request'), + ModelRequest(parts=[SystemPromptPart(content='You are helpful')], kind='request', timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='')], kind='response'), # Empty response - ModelRequest(parts=[UserPromptPart(content='Hello')], kind='request'), + ModelRequest(parts=[UserPromptPart(content='Hello')], kind='request', timestamp=IsDatetime()), ] _, anthropic_messages = await model._map_message(messages, ModelRequestParameters(), {}) # type: ignore[attr-defined] # The empty assistant message should be filtered out @@ -6352,6 +6377,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6385,6 +6411,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6422,6 +6449,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6456,6 +6484,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6492,6 +6521,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6550,6 +6580,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6583,6 +6614,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6634,6 +6666,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_bedrock.py b/tests/models/test_bedrock.py index f13aaff4fb..ff2eb7e51d 100644 --- a/tests/models/test_bedrock.py +++ b/tests/models/test_bedrock.py @@ -130,6 +130,7 @@ async def test_bedrock_model(allow_model_requests: None, bedrock_provider: Bedro timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -304,6 +305,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -334,6 +336,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -364,6 +367,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -440,6 +444,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -470,6 +475,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -763,6 +769,7 @@ def instructions() -> str: [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -804,9 +811,14 @@ async def test_bedrock_multiple_documents_in_history( result = await agent.run( 'What is in the documents?', message_history=[ - ModelRequest(parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])], timestamp=IsDatetime() + ), ModelResponse(parts=[TextPart(content='foo bar')]), - ModelRequest(parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])], + timestamp=IsDatetime(), + ), ModelResponse(parts=[TextPart(content='foo bar 2')]), ], ) @@ -825,6 +837,7 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -853,6 +866,7 @@ async def test_bedrock_model_thinking_part_deepseek(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -886,6 +900,7 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -921,6 +936,7 @@ async def test_bedrock_model_thinking_part_anthropic(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -968,6 +984,7 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1004,6 +1021,7 @@ async def test_bedrock_model_thinking_part_redacted(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1062,6 +1080,7 @@ async def test_bedrock_model_thinking_part_redacted_stream( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1196,6 +1215,7 @@ async def test_bedrock_model_thinking_part_from_other_model( timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1203,32 +1223,23 @@ async def test_bedrock_model_thinking_part_from_other_model( ThinkingPart( content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - signature='gAAAAABowgAKxFTo-oXVZ9WpxX1o2XmQkqXqGTeqSbHjr1hsNXhe0QDBXDnKBMrBVbYympkJVMbAIsYJuZ8P3-DmXZVwYJR_F1cfpCbt97TxVSbG7WIbUp-H1vYpN3oA2-hlP-G76YzOGJzHQy1bWWluUC4GsPP194NpVANRnTUBQakfwhOgk9WE2Op7SyzfdHxYV5vpRPcrXRMrLZYZFUXM6D6ROZljjaZKNj9KaluIOdiTZydQnKVyZs0ffjIpNe6Cn9jJNAUH-cxKfOJ3fmUVN213tTr-PveUkAdlYwCRdtq_IlrFrr1gp6hiMgtdQXxSdtjPuoMfQEZTsI-FiAGFipYDrN5Gu_YXlqX1Lmzbb2famCXTYp6bWljYT14pCSMA-OZrJWsgj4tSahyZIgNq_E_cvHnQ-iJo1ACH0Jt22soOFBhAhSG8rLOG8O5ZkmF7sGUr1MbP56LLkz29NPgh98Zsyxp4tM33QH5XPrMC7MOfTvzj8TyhRH31CWHScQl3AJq1o3z2K3qgl6spkmWIwWLjbo4DBzFz6-wRPBm5Fv60hct1oFuYjXL-ntOBASLOAES7U3Cvb56VPex7JdmTyzb-XP7jNhYzWK-69HgGZaMhOJJmLGZhu8Xp9P6GPnXiQpyL5LvcX_FEiR6CzpkhhS54IryQx2UW7VadUMnpvwEUwtT2c9xoh6WEwt2kTDj65DyzRwFdcms3WG_B1cSe5iwBN1JAQm3ay04dSG-a5JNVqFyaW7r1NcVts3HWC2c-S9Z_Xjse548XftM_aD97KTqoiR5GxU95geXvrWI8szDSYSueSGCTI8L7bCDO-iKE4RQEmyS8ZbqMSWyQgClVQOR5CF3jPKb6hP7ofoQlPRuMyMY8AqyWGeY9bbWb-LjrSDpRTAR6af8Ip5JYr4rlcG1YqEWYT-MqiCPw3ZJqBXUICSpz9ZHQNTrYIzkJZqPg-hCqvFkOCUtvOYSDtGkAe9x1ekPqlV0IuWLxAmjqbkGH0QCaYAF90wVQUgWPkVWfQ6ULRz2sveQDZf0P8rVZw6ATEvZVnkml6VDbaH69lMyvzls7suvEZJxS5osyjrGfkt6L4nsvhZS7Nuxj2TcRxSEXxo5kULEqAO85Ivsm4j7R1Cxb2h8I4ZZZ_-DnkbWsgd7DELMI-CYtpAWLFl4K4VaMBT6mNAuud545BemUlWnQgmrde4aS7Q_W5GP11iQea9_JcJr6DMf4Y40NDr_fPVU5p7q1bnc1xtwkIpyx0uEeXHEZDR8k-5apBXScJtmelzpiy-25oJdSU5xtgVPrb77kVyJofPtujplZoqMh6MOqTdIhIMm_Goy_Wne4W39hVI01b2vwduBaCCaX6M8uACX96s454WPitX4MYAVc65UHF0BTFskEcbY5bFZpzcWb39VTfra-Ru2URvdo_66qmUd-03XzLKiMsqJHGclhaU6XBqaIo9qD8FjLVT9DOx56eh3GFvYA1dxvgbp6gyOg7bOBL0KDarT9Vmo40vGvwyCT_a2S_6Oki6uBU_3bf-jGvtum4tkN--wZkBrhOj7L8onItPoAZQXjYXrcXfVC1KR_xA0IOxYZD59G1rBxTDlvatIFwhvoISinkU-zPkKMpralHlxDicmJrBsKsy-mZWCF5qHeWF36pjE35dE9GxR28xw1Ed0pA_kOIgMKSKCiRWUYY8D1jAHKzimnj_4VTKR05kTp30pasr0IUMl2celsQMDv1D2atEJ_65CeRio5cnNGUR_Z73LJ-fqLkSjSxlE2YvtcKX7bdF6bSq3EqDtOdLVUjYl_pxRaUNMRmahQUJXGsDx7X-W9xUgQmAq09qT3lh1fhVUgdtUuuaoNY_M1s5V0E5ePuu_C6Duuz8WCcecbhrcbI3FDQSJn_XHK6ImLMYBowGRYVkBE_Rf7q7Hj4zdF-3bVE_QDce3syZNshCYK5kO8mvADptgdNVG7lEiZ9TIQPBd-XWRUrZ3XvIfGVJFVMjh_Laq8RTDyvvId7iQDuvq89hQ86hlfWteEl8HzuwpakWnogg3CCStX5CMGpYUWWkOCUu2LCH2H4EBaeCcAPLCmEoxcpKS182kYLm8-4ShRz-YOMIEmE9TL2za15I6BCBi9OhQGcLSl4BquhfBVHyxmkEN7_g102yI1Ocucux8q_HLMo5UZz0KALRQy4qmNpnLg9f4Yetj6msezjuU17Ji1ofIcadglOYy2J3Aswf58M9fCwCfB6hAHRYM2XkYzJ3nc0VosWA0er90zqKOeM1-erWC-skbupO-8nw9DA5OtnJTZOLnhGRjzXqna0E5R69wOHi3yvb3zzv2K9fLMKi11bCM_cnel9ItcFM-AYQ0AhBTZ3sTn-tpIf3IVNCvnCxMWvbO-MBmoexQnPorA0SL6n_nL49Y9Zb7UgwCyNGmhsFjIlSXu-YG-yCV1lVXBYoEPDwa2eCaMwph0QneXPHHMUs_i9PuFVI-nwfEiwU0b4tk8x3tWdkltvtzhjB8fxQxJNrk-ykNhuEYfQMQ0_MCqIRD097_gjO8q-eFUjnuiVqqoQ9_rH9QCxABdA8afoNt0hFxBwR6d57P81_XKOnApyrPx0DjsuKVTBFoCWccKX4DZuQT_PhmsFtPquNp6OPWQM5a8HzKntjz_HgFYnyS5p6n0hBGZVC_GDtFEm8JELcwuVoSLSXhI_XKnck2FIhHA5YQ4vLGOhCEEZoINkDdq3oNgm-NiP-DpG2LYetLl4ljlUpRBUizmWn4Fr3jhIt8rmQwqmFj6aMDSEM0Sgen9DsUH7H3uGK2NipvFv2Uxic5aXAKQ37EFjxPFqvKXlDl-hLnUXtkXLXBbmgCJJw6nBvm-SeIxU_eKnWHkhtdnkNZrmNFaq0OYZKk-moYSxEgzxasQNYGtkN89LqAhRTS6dIbb4nXa8ArvuHTJ_qpLFjGF3SSX98Y53cgtSdGTTmHQ6_v0BmeKCWhRd83vPrmFosif57AXyBVk0HJ5YdeueitsBCyXcJmeCntrT4zDlujwuMWK7wDO4vGMj3nIIyuJMJjtpD_auuDLmpYHqmKTHm8Ob8R2jJIwDhJIupkTldX5kHZmo6Nyh8tjeMgeEbp4Tp05CfyUTWWM16gaGkwW2Gto3sJtv0AiA_PzSN_dDziD5fRSH2Q2JTW4g03Uc9SBelL2fFiQifPSc3-mI4i8QHIswd_qPnSAnHxBW6SLJFqY-qIG6soLzt2VnH5hpVvakMfO27A82DQrcoFDFsqRb8KgLEoL5u-6NbgwKSNFjfIrLFg9IzrQI7oktylkFrc_EWL_smmL6iuT5WEYt4jBwtMvyDD6nVHzzx7jd8J3XQqjXfWuH_uTAX6cOHprzaPn05QRAluZgcBL-FSQJ3Qw7PjpoiLyd3DGL77nfl_m9cpAnpz3ojtajP7Gb-aq_xa_JIqxbnuBDBkeyN8pOQp--ZD7T2BOAgS7poVoqPFXRYIJOwKtOcrj6UdPN2yrx-44ZMTJYzwcGELnFRs32PKx8TiiF1pKSwo4NB5Z97_0k_WbyBwyNajMtRUPmEuTr9VoO7CBwe1r3U3iIZbBKCfJjiG5FQToqzku31_YAs5OIIaV4B9ifLt5PwUA4mO-7XqgO1VQQjt2cUQo3Ui3EKWEJ-ov7F3wf_byGsguBwv2qMuAQiLBqs5jxrJUxyYIJAM7B_TtUjpQnNERvHEkt9TxCN8Kc6L-MejMOfu3VPdArf38naQjvBjBAZDznV639bkIRED7-soJbGMcGEyGWUqAVs9vkFleO9S4YLNvFShwo3ujBd7SMMdAyvi851CXT5uN5SDtaxmQnUGzAXmPJ9-UoJF23lSGB26eMdnIerzFoYMCgWPHyvt949IrsUKnpjuxebqQYVSrppmhIIrD8R255bJGSscVwdbrd9iA9-gHoB3UzCr5pd3gfW9Z6ynT4dQVILqtj0KgrDOHw4AIBqmwaecTBi5BeyXJx2oF1ClqS_7AanfqNToLcAwaKXnrK4RGyrX_mXHUFX9cT-o-eGqhi0lifCcJixwb3kG2AhP1USNNsCz31m40_c7cm7JcqLbzCnz4hvbivUvON5rf6kQ8PrfrjNrZA73VVIKhgZBDHxsHa3skwQvq-JH_3QulELy1-6vL5Kq84bg3ZPQxOUtxBRuyjxEJkpgG-sED2pYsKrUPqo0Ku_ggMTQjvoGGYRBt5uMlVX4pdB1zhOe1ZjcvPb8IwnL_BdLX4NvLpN97KH9Ot45bLeVTCGpv5UH8Nnm5CzQ53wqsOUD-9u5hqrSwx89sF7h8TlN9non95r7b_oHkU1R_czZ-ZjL6EubsUx4w-rWKwVU7GYde-ie62v8jcaLhkM72O4B0UvCfY2t3GtruZ4OirX44hWfOPujFr5L6bOkVSMKONJFooIJ2RIwCw64Mczkle2zQZ1P3u1DrMS5s65h-gNTwSGw3qyQBwF58-um9ycDis6f6O0ggqubsCDlsW7Vdnk_GlETHLDQ7lR_lRG1g3kRQEhKz2iwzxQan01X021EJd4TlocJYafpp8HU_rgcJdUmcvPFgB2xysE6F1vYdUAdovDztLftb5Bad4aKueUfDs8haq9TBgosHQinvKFfazE2StHUaEAVK_BiOYrH1XsrFQlXuMwhQlRgA9L3Q663gMrnhnfcQPSNd7P5EhqbadtddoVrLOKhMD5yBJj9RiC0vamCGVr2LA7hStIPBGysTBanE3u4bT-TKe2qCOskvfR2xU8NSlai9b8d57zkuxklf7LaDnMi-xu9TOqduYFfXOn87uqjaN3_emcq0NExYcQ1fMUMcbOuGoW6qeWlWmMtANjI3VaJCa_v2JYJ4cyl4gUoboC42d2esKg_Em2XfqUkKQh4XTG673LC1ebToWGPRvFtTQM3gZ4Wh5JY4pL58VeSsf1jhINWsytNpgGckHCK11BzUUx4MABT2BuMWf-a_5DV4KYdmXHn_AKAqoZWHgE2hC2Q6DUEaKTm7AV56Cm5vo-NibALDGH1zG8ih5C3dmHvQmES7vUOVM1jPS6k7paHXEwnPFE9M-zg6XmjKjdvSZ04lauZEeCjSJPb4E_v-uWlwkdHsDcTxfj9oTjfEpX0mZxIuT_Ex7Mx2I7DUHDUQgKgZT9n1TQym9patiPO8VYzYuoXrsEeLS1Mk5N3AmQXeB89x85_Xj2plBbDOqqMpAD2uMBXwHI4kut10unkHhl3S0JtA1tE0ukxTRaitpDQveHfao0tQC8gy4JEA6M5AD7iyWOm_iuW9baElC-R_g_6s_X1t2qv4mWwd8P-h7yFm4XEZg_oJEIA40hGwSPKD1d-b9QRz7Kl734V5RvMw1ekdsvZ9dVKNcPffkGX0inTp8RgkOWFUnS0hZpxuNbte3-rGWEt6Syy4x2jaH-Zr6o667kigSt1Q3cQO_eqQtq4VWuFmYIbDzkEbIKmIHY52gh-rB5k-FMQqCs-ay5Blj_IpvfcImMtrZBrbhL89gzGNRonBZEa-9kJeu4jr2_DLzw14KJR5zVNwiGLub3jJkgYqOZZ5ee_oNchx3v68S3wHyFnZA9IIaXRZjYLMrjD699h9SZvkTHdGAwICpyOjrfYbgX_7woRp1ZWBslOamnw6mDqJAk22nb1a8cpdGNP2IjXVRtuqIB8y36bHEFjChDTxERZ2dsz7a2mp5qM2Xz75OGBM77DAjnGpU7GFXDnolAnAsU5T3dd-LLnVlVhvzyuZWg7ZdH-0WsVVCezyIsQnm3WMpdPrlUcHtT6fyY2fhJVIm1QJEES5wEiEPMRrmGQ68V-q8TWlrPan6LU5Kr8Ak0nJKhE-r5bcaemeUbIsY4a9n2YDZck9CI6VGumMccelQ61Bhs5vgQ0W4AID90TXnUtJjWrVcgdhrLCWV_kv2_YSqDDoI6TM0oJKNaoNeG2HXCxXpHy8izUvfMwHvdniW3c4BPnvMpQW83bXrMPteKk-CFXdwQ6bB2PzzXAzWTp5q6D5cLWAyPJjju4AmopBUJmRwp0tjulMCClWqMiB08y8DIWDDLAAaG7Q-de-_Q-T6tZy4LRk_c0sYOtAaNCA1HgTDSLvP4j-xeuu8DrKv5SqefP2J7LLFM_JAi1gRh_84NUvUDvBdexr9wZI8eXjnnoDvP6KTosKCLmSC_ErmtzRXfUg1mz5fNVtlKSm03tqzmfL46iKDATVuEejDtlo34djj7uBV5DUw4lDIpQY1VsO1Ozgpoz9i8sNcRKQ-K3Of-vDL6R28gLBUq0Xo3nm1hAJgjc68C57jrMlJhD8GM6AeoGnnhDTfJ2xuxsdnH6i06qFUKcuTmA8l23Ek-A3ryx8DHAIaRX40d3e5MwaUqbglufHWBGId7KBiaiFuD3LhJC0CLl23XyHf225Rd4lir9LpltmuaRLnyS0FwIGZMaRmxQ-SWB2fDVzj81SJpo9lPDsuLu_ji7AA1cx-PnTj5fVp3APeRmy9E0A2v8hCKm4C6tPuvgC7Xp6MV8epxYIsGRiTy5wlHQE0FUuOdBtBH0rmGJDf4HQJoZHjhDhOJZqkvlDtEowB1mtndHgRz-0lpQurRm-RwKvl4n0quBfWZ1GL_PmiZIO36Iyyw4BRt3c1a5Zc5ilweQcle_-ZxawS1aAXXOaknt2c6AGB5JnmrTz2dXS7A8M20uNp7Cv8RoeiCYjPa1Co3Nr_6BuQL7HFxNsyk1AXDbG2qUJljSeWG3YFkaPHxgTw7aAefXrFFL_GNPi0YtageYJq3WN6lrdQ2CB0g7QLoj9dsHlAGhm8PtUESBUBbSyJVOm1lCuGGbB7psYxOLLO3BSqnXHb0--sDiyCTKMi-80rtMiHttXC3zAxXUFQjTre3a8KNohgPWx1PTAbxf96enJ33rhBV-2ewMIROT9j-K_Esee0eWUcTmt9v0yHW-V5ij0Hopx7oaXadNQLdgBJwUDf6R9xEktHhzUkyJ0g73gjrKQz2EidorhljD9LSFMAlUuRTkUhG35crMduH9TAAEgOHXZI24CD5Fz3n2KgXKoxWHlpaLlTwBXK1xLHVCrqCqvsBo60w5FV7cmdNTBjFbDU1EKSHLopt_aMgtT_6Fg1ZT6H2p0CAvvbinLkTLop3pSVU1_itnzRHOf3ayHzMrmSN_pI_03Of_63ZuHJmRWRCd7s1PviAo-B1LcG52VTanJz0JCF1RAlPj9-2DIgJLxDgNcPI96cTqZBbLk-rwKlebrmX6d5CBg3V5pmJKkgLIj5FpTmhiXhqDHHJvu-BxfzDQl2c8QtQYF6aygihfCCluN5biEv51XKRDpC-S3sU3USofDTgcg1pznwUvVv2eL8nWywckhIHWnip7z_ptCTmyn7BEzzgRgGLA_pLG17SPRJP6laoXHG_dprfpRM7gcLJZQ2zk29W2zVEpFwWePGpnQbpPjPqcOBiQfewxwnLHEuV8yGBR7Y-SEKrc6M6v8AHYk9oLXaRu1qBKkLUKSzKQhNFtfl-h-J8Adf0W9hxYSt6QNzf1YUuE8H_w2SrUGcVnsCnIQY_xu11sJ-0d-T2oFelzeEoasMeeCDamuFQye14ps0k4cM8vXpk_7ZrVE7rQmEpW40_n1iNHwB4UINg9CnQGXH98DzBBCoGPZpA1SELOwGTcJGcBZVQ5Tfey1SRFwXWJO0QFHfDb5-_tQUj9o30MhJBGxOftnwLaFROLgq3FuSBRM9dYsdlpHe1SILQXKVIwjXcOVMFgmbDq_hMSNFlMvblX9LLBduT9cXk6JhBVcxb8-oKbvbjL7zqQHOgke3ZC6oDEvcew2YzLMiNLiyGxJcthsyDfrWbhbq9DSRE7lYq9AVeh_Zc2wZq0RFh4CJGhXtW8WobIOY8JPIkyQKD4W_mKRxchykWyrCRliFId1Tzbgzu1NKxdZLiGZchs7MRgd-c_Kk0mDAvcVqyCSw5ZnlG8qWxmgwods9KD80tww2Bvp87a9Jwf-S8_PhqqG3ggGuLLm2CH71h7v6uA7f9-aCJKnlPiyb43OU2IK-rRgJf_U6VNAs1n0-RwWlaMttgA5wcecqRUlkneFkWpJOKDXpuAR9vwfoArMnPnp0jGQDN3-OPymX4xsYY6L4k0zC6j3zz9K2wgcGFD9kliVy2qwbeAqWL37Qdnr6sEbkxusF6IiYh-POUU_8rCQX03_uw0XHroHwK4mFajchjXmOY8ykOBQCIGPwCNI446xFhqWFDytDTXq9Eu651PlEqDELIcRwQz6KYWNJNlEFi4_f4GYS8sn0wpwte5R9QuaaLjc38obGBswmh15l9PrMvrWklBnnEZpV3NWmxQViKWcuey_QG_hRfQ-8Kjhv0f4D4L-d52x89yVXeVu0wbN_GstklEGCCecqvmQi1vXDf2FKr69Md-TE-mAh9pA-72vepP3guNcHz6PqzzOQX9Sj1uNZCkB0heHrXuCunn_Elv3ZvHZ-9AE26ybqtRVxaHtYrbtX9AKVk7ud_YdFPxSq-HeavXCXOBDGxEVleN03Q01jj7xoz5MjhKrVDF7XOobW0xMLtPfJLLmEGkBtSrLFCDGo1T7T3DnEiFQzXZutM50_l0k_3DxzDKhI4s5rOeeTMjSXDaxjM52LLgwAanVnMtKEsEXFVF4b5xvu_xn5CzqW5T0TTDOFXm2Gdxj-t59bgRGmnO56K85rTGgeJyXBroTz8cS4hkgfm2fQKiDAQZ5iMJeY4iqKZJTrOYb0IueB_ez-I8XW_dibgUd-WcJNKYKf4KnZR9_Z8o4OofbCdVj2mcgunpgjbTCORNWj7IpYmkHcbIQFtXnnts_2WNf-TtE6xr-iIVkwGABYE7ugHl1BUO5yKuDmeTOijSxWQGO22dzPnGVQ4O7AuXUYBFRa6FKVEIIVyk49ggvgRFFerncqEW1s8LR9gCzMIsxH2jCOyOSqjWGdZncRqDWhF6NYgFsqs3BDGYspC1vd9KFYppnH5W7MRYb2Duoi9yb7SQhNarto9KaqqgiTdEWeOw3kSkTZxa1moEh8F3ueFWhjQXNW4I3_inDPUdw0Xcf703y7uitnAsi-235tGC36JkWMR9M9Dx1cQSnS0NWhOYjUPPrKSHW8QCY-ZAfEUSJfixJeXEEUI0YmuGlFCIrLFvtlqFjxzqJW4JPCfnB0jCC9Z07d7rwHznYBSkr_cis4gNwnPOa11060WODyso6zRSJ7Q57bPhULvgnMZHZq2hl5dygeAz-elG8XYIUmr8jwXKuVGT_hl13cNI5QHaxshgdJuTzE362jxI4c0usFIVIzwhX6KqDFtWIZ5skj8iGioS6pDkY5tTj91aRu1ZL9eQ7KSLBbPeqhZCjQJGuudUr4u7HGuz8lQR0KvuZqKGGaybbPYwzJSx9qkGwqr_RNT7RW7oDxNiPlUHEf1qvED5M5FBFt_YlTmVtLQDJHRxvx3jv-Nc9pm6tew-et17Z0lMcXypXhr138RTXZYHSwJXsHMTNNGZFHCuZsyrq-PywrzCm-i6tXstJXx79s9os_dAaYgMtYEjPNRCb29LjaNw6OL60MKAl0Fung52DEDjnxFCTp9ygM_IkmLw95r9nhdq3smfsasefn6cp3YnEG3skKDswqS2Ul8Pilfqz3JI7mVucw4zA08ICIXAxB_L8_MPXUPPrVrdcf2HHicjjFs5L7mabPyv6blX2uB0BJ8Pcsdr_qdm-JbxmEEZZnxmtaG0VPgo23-DaHHIdMnNa-4cElpS64Tqcanin5QIsd1e1jIBJcjLmGOjV0eJpawOICK6dIhgdAsgLyXT-ItiUkVc_7NrPdpe0Fag7jMtvqXlvi-JljdILhGfbT7o-rNPY2iJ32jKUIDVZTSADQRf7Psnt40y3m1Ccx6aN3JVhNrgihrfjMF4rhZkqrh7Rlzs350VVOar8RblBoycjjBh9-xyXXSp4OWebr4rK6w76HQqKoOdQZvFrBG0Y3Qfkq1tNnJyy7QA3ZZwhnVPzmvi7GeCLIZMNQLQ2A3mUvZXcmmcI2NmLBJuTHoQ5IBhmtMA9_b1qVTt-8iy0jIklazgzzUa0Zdl3IAuptdmJT7AGneTDhrR60WBnxVbbjJa-_LvOyVdEVimw6wNuUO0HIuyLo7s5MkR1D1SNShzV7PUtM2YKxUxbE1zEHkqiTIF1P5RxIhh85XAaIaJMlIxjhvtIUy--jiuzLh9HDDjDCuSMRrqOk958lSAkZnProYbHuRI12ViZ561Z4-whNCQwctuoP68FvRWLByoO2NtNSaPBC9aqNx6OWHcTTGdaip8MZLmD_xPjoq6O04HNxBsaCQeo2xqMkeoB74m_8HtZQIPHyEgW2cAnDDOPDRFspt8KN9TMgAWTf_Pa7eI1ZvWo1vZtjOUi9E9SARkpmtFNtaQP_NRLp_76h9B_piPJCdzuIl9QXbwscJOaDHIlYfeauN1j1zMGmSY1jS2UPNPF7Qfy1wUcdxLFuzGy_1YPe6i8DoMimj_c995kmHFKi9jIdBHrTz5p-pX_E01O95Wd1mzgCeQCo643zzQ10c93MASc5dgHgCjyTfT4RATHXhVrhhjnamu0xnLxIHt0qA43qDfQd23xzzp5gA0KLoQ-b9fYpo5tjD3z-A6BuVES9k9W60WN3nwxJiil6rjHSHxzq_rmoDj--EOBsKv5TcismcMk4IdBgoKWcsHGW43c5t5gmaA6c1QZPDHZWTnkHPZIsH2U1kMcsNHoWG-H-xQ65cz6cceu_ATRu26etMuEZo4ecqNSENhCQq7NlkEnWVacuW7qybowkEr2uIU-BB_wI1oHPKVupH-0ZOHsVZOgktQ5g1DWiXUVloBabeRIZJt7fDYFs5oNgXxggElnN9fK-fb8BQb9j2BraENpRQonC68YsbFQLoyefvK3WnO1GFQQg7qDqzhU9PgMU6CIYfMfuHAoFiXtaTsnykAIv7m0nckJn8nldATLqakn72ObT_rzRQXi_cKoksBvKel4sqg7FtoM9no5s9a3wT1OwRXNUZ5Jg5iYyFW9mlRV4-Pwo67XhiipGG-iXsqxlhmDQjmeJoBfOKfm3MWJccFO9hMReoCp1DDqP5wxG_1gFMhl4mHPgxQW24pRrYOO00YYdR9VVrsBdjalyjo4mK5PuWqP0O3BKTZ7-Al2P5_VyQ2MxMZAZCkHSE5tRIkq0k29sZLPM58yUwN5FkIrzop2PR_VNYNa2eY2jK-mVv7eYvwcq9LcF6JbJN79K9YyPI-dqKutPoFzFXQEijdF77VbVDQYN5v33gKMYWIyXUb_ZgBFZ9wwZkGkzK7aRR22QVhUMk-M6dZrVH365Cmnboiq_7ZqSIa49uF1qlWbCljkpXMDxF8i0YGRdx4CUSU6vfyKyMUtCb-c6ZxGztojxz72-u3SwPOEJeRNUjpgH25LHo21ORRGuDHM0p04CyxXYe6YH-qYyINgouQ58GorDnhZJfLssqXFDEV_HeQfuZp-KsEnHSMDgX7ibItCu_ETXE2ano2M0XnOjdmSPRHl1aFyQAWkHsgTsrlzucRFcDhkK1BNIGPgC4eWce4bsaf_DHP0OJW8qVEnd15Oj1r9Om2K-vL5pYCLySkxA85DSgMKNOXPsPV3wGkiJjLJqn250v5aiwAziMHrcY5ik4Fm2AvDlRXPvGqXOuQG-zJsFc05J-1TBLgT1wZ1b2mw_qihmlJt71mthNKfgjmCMtx6WVKgRGM2lhdZ6gXt_9AkBcf3Rax9inuLnPgfaOZSCNa-MMR5yVa7ql7i-NwvuupwuuTuuKGkXv_-T3EK-Ky418dDDOMTgpW8nHiUM6Y5uBu6v__N8NMYvnJmujw6dUTNMR-R6vgaXdDtzs6a4KAccwIgqQ43uhgDexj9x4OB4304dKb5PJ2HpgIlnXlhjB-JGmnQAbAIaLrEcW9V0S0PX4H_Mz4NGqaAtDTeeiw=', + signature=IsStr(), provider_name='openai', ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27', - ), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), + ThinkingPart(content=IsStr(), id='rs_68c1ffe148588191812b659c6dc35ce60003919771fccd27'), TextPart(content=IsStr(), id='msg_68c200091ccc8191b38e07ea231e862d0003919771fccd27'), ], usage=RequestUsage(input_tokens=23, output_tokens=2030, details={'reasoning_tokens': 1728}), model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 9, 10, 22, 46, 57, tzinfo=datetime.timezone.utc), + }, provider_response_id='resp_68c1ffe0f9a48191894c46b63c1a4f440003919771fccd27', finish_reason='stop', run_id=IsStr(), @@ -1256,6 +1267,7 @@ async def test_bedrock_model_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1310,16 +1322,17 @@ async def test_bedrock_group_consecutive_tool_return_parts(bedrock_provider: Bed now = datetime.datetime.now() # Create a ModelRequest with 3 consecutive ToolReturnParts req = [ - ModelRequest(parts=[UserPromptPart(content=['Hello'])]), + ModelRequest(parts=[UserPromptPart(content=['Hello'])], timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='Hi')]), - ModelRequest(parts=[UserPromptPart(content=['How are you?'])]), + ModelRequest(parts=[UserPromptPart(content=['How are you?'])], timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='Cloudy')]), ModelRequest( parts=[ ToolReturnPart(tool_name='tool1', content='result1', tool_call_id='id1', timestamp=now), ToolReturnPart(tool_name='tool2', content='result2', tool_call_id='id2', timestamp=now), ToolReturnPart(tool_name='tool3', content='result3', tool_call_id='id3', timestamp=now), - ] + ], + timestamp=IsDatetime(), ), ] @@ -1409,6 +1422,7 @@ async def test_bedrock_model_thinking_part_stream(allow_model_requests: None, be timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1438,7 +1452,8 @@ async def test_bedrock_mistral_tool_result_format(bedrock_provider: BedrockProvi ModelRequest( parts=[ ToolReturnPart(tool_name='tool1', content={'foo': 'bar'}, tool_call_id='id1', timestamp=now), - ] + ], + timestamp=IsDatetime(), ), ] diff --git a/tests/models/test_cohere.py b/tests/models/test_cohere.py index 07cb6ae9b9..886d09b73d 100644 --- a/tests/models/test_cohere.py +++ b/tests/models/test_cohere.py @@ -3,7 +3,7 @@ import json from collections.abc import Sequence from dataclasses import dataclass -from datetime import timezone +from datetime import datetime, timezone from typing import Any, cast import pytest @@ -118,6 +118,7 @@ async def test_request_simple_success(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -131,6 +132,7 @@ async def test_request_simple_success(allow_model_requests: None): ), ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -200,6 +202,7 @@ async def test_request_structured_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -226,6 +229,7 @@ async def test_request_structured_response(allow_model_requests: None): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -292,6 +296,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -318,6 +323,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -345,6 +351,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -436,6 +443,7 @@ def simple_instructions(ctx: RunContext): [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -484,6 +492,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -497,7 +506,10 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 5, 22, 7, 17, tzinfo=timezone.utc), + }, provider_response_id='resp_68bb5f153efc81a2b3958ddb1f257ff30886f4f20524f3b9', finish_reason='stop', run_id=IsStr(), @@ -519,6 +531,7 @@ async def test_cohere_model_thinking_part(allow_model_requests: None, co_api_key timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_deepseek.py b/tests/models/test_deepseek.py index a8c0d8ceaa..4ec81579a9 100644 --- a/tests/models/test_deepseek.py +++ b/tests/models/test_deepseek.py @@ -1,5 +1,7 @@ from __future__ import annotations as _annotations +import datetime + import pytest from inline_snapshot import snapshot @@ -36,6 +38,7 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -55,7 +58,10 @@ async def test_deepseek_model_thinking_part(allow_model_requests: None, deepseek model_name='deepseek-reasoner', timestamp=IsDatetime(), provider_name='deepseek', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime.datetime(2025, 4, 22, 14, 9, 11, tzinfo=datetime.timezone.utc), + }, provider_response_id='181d9669-2b3a-445e-bd13-2ebff2c378f6', finish_reason='stop', run_id=IsStr(), @@ -83,6 +89,7 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -102,7 +109,10 @@ async def test_deepseek_model_thinking_stream(allow_model_requests: None, deepse model_name='deepseek-reasoner', timestamp=IsDatetime(), provider_name='deepseek', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime.datetime(2025, 7, 10, 17, 41, 44, tzinfo=datetime.timezone.utc), + }, provider_response_id='33be18fc-3842-486c-8c29-dd8e578f7f20', finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_fallback.py b/tests/models/test_fallback.py index 2d58fae71a..d974d1f85e 100644 --- a/tests/models/test_fallback.py +++ b/tests/models/test_fallback.py @@ -77,6 +77,7 @@ def test_first_successful() -> None: parts=[ UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -104,6 +105,7 @@ def test_first_failed() -> None: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -132,6 +134,7 @@ def test_first_failed_instrumented(capfire: CaptureLogfire) -> None: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -815,6 +818,7 @@ def prompted_output_func(_: list[ModelMessage], info: AgentInfo) -> ModelRespons timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), instructions='Be kind', run_id=IsStr(), ), diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index 0f2e51c0ce..b5a9f11313 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -561,6 +561,7 @@ async def test_text_success(get_gemini_client: GetGeminiClient): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -581,6 +582,7 @@ async def test_text_success(get_gemini_client: GetGeminiClient): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -593,6 +595,7 @@ async def test_text_success(get_gemini_client: GetGeminiClient): ), ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -621,6 +624,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -640,6 +644,7 @@ async def test_request_structured_response(get_gemini_client: GetGeminiClient): tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -685,6 +690,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -706,6 +712,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -734,6 +741,7 @@ async def get_location(loc_name: str) -> str: tool_call_id=IsStr(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -892,6 +900,7 @@ async def bar(y: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -914,6 +923,7 @@ async def bar(y: str) -> str: tool_name='bar', content='b', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -933,6 +943,7 @@ async def bar(y: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -979,6 +990,7 @@ def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1005,6 +1017,7 @@ def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1191,6 +1204,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1223,6 +1237,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1379,6 +1394,7 @@ async def test_gemini_model_instructions(allow_model_requests: None, gemini_api_ [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1457,6 +1473,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1497,7 +1514,10 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': IsDatetime(), + }, provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b', finish_reason='stop', run_id=IsStr(), @@ -1517,6 +1537,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1530,7 +1551,10 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': IsDatetime(), + }, provider_response_id='resp_680393ff82488191a7d0850bf0dd99a004f0817ea037a07b', finish_reason='stop', run_id=IsStr(), @@ -1542,6 +1566,7 @@ async def test_gemini_model_thinking_part(allow_model_requests: None, gemini_api timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1614,6 +1639,7 @@ async def test_gemini_youtube_video_url_input(allow_model_requests: None, gemini parts=[ UserPromptPart(content=['What is the main content of this URL?', url], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1698,6 +1724,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1720,6 +1747,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1748,6 +1776,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1780,6 +1809,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1802,6 +1832,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1830,6 +1861,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1861,6 +1893,7 @@ def upcase(text: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1933,6 +1966,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1985,6 +2019,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2038,6 +2073,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2087,6 +2123,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2109,6 +2146,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2152,6 +2190,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_gemini_vertex.py b/tests/models/test_gemini_vertex.py index 20e2e5e0dc..23598389c4 100644 --- a/tests/models/test_gemini_vertex.py +++ b/tests/models/test_gemini_vertex.py @@ -141,6 +141,7 @@ async def test_url_input( timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -180,6 +181,7 @@ async def test_url_input_force_download(allow_model_requests: None) -> None: # timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_google.py b/tests/models/test_google.py index 3ef8cd5dda..99c324962d 100644 --- a/tests/models/test_google.py +++ b/tests/models/test_google.py @@ -5,6 +5,7 @@ import os import re from collections.abc import AsyncIterator +from datetime import timezone from typing import Any import pytest @@ -61,7 +62,7 @@ from pydantic_ai.settings import ModelSettings from pydantic_ai.usage import RequestUsage, RunUsage, UsageLimits -from ..conftest import IsBytes, IsDatetime, IsInstance, IsStr, try_import +from ..conftest import IsBytes, IsDatetime, IsInstance, IsNow, IsStr, try_import from ..parts_from_messages import part_types_from_messages with try_import() as imports_successful: @@ -134,6 +135,7 @@ async def test_google_model(allow_model_requests: None, google_provider: GoogleP timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -199,6 +201,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -226,6 +229,7 @@ async def temperature(city: str, date: datetime.date) -> str: tool_name='temperature', content='30°C', tool_call_id=IsStr(), timestamp=IsDatetime() ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -258,6 +262,7 @@ async def temperature(city: str, date: datetime.date) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -324,6 +329,7 @@ async def test_google_model_builtin_code_execution_stream( timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -564,6 +570,7 @@ async def get_capital(country: str) -> str: SystemPromptPart(content='You are a helpful chatbot.', timestamp=IsDatetime()), UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -595,6 +602,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -626,6 +634,7 @@ async def get_capital(country: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -931,6 +940,7 @@ def instructions() -> str: [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -960,9 +970,14 @@ async def test_google_model_multiple_documents_in_history( result = await agent.run( 'What is in the documents?', message_history=[ - ModelRequest(parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is a PDF document: ', document_content])], timestamp=IsDatetime() + ), ModelResponse(parts=[TextPart(content='foo bar')]), - ModelRequest(parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])]), + ModelRequest( + parts=[UserPromptPart(content=['Here is another PDF document: ', document_content])], + timestamp=IsDatetime(), + ), ModelResponse(parts=[TextPart(content='foo bar 2')]), ], ) @@ -1004,6 +1019,7 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1083,6 +1099,7 @@ async def test_google_model_web_search_tool(allow_model_requests: None, google_p timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1177,6 +1194,7 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1322,6 +1340,7 @@ async def test_google_model_web_search_tool_stream(allow_model_requests: None, g timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1427,6 +1446,7 @@ async def test_google_model_web_fetch_tool( timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1506,6 +1526,7 @@ async def test_google_model_web_fetch_tool_stream(allow_model_requests: None, go timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1631,6 +1652,7 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog SystemPromptPart(content='You are a helpful chatbot.', timestamp=IsDatetime()), UserPromptPart(content='What day is today in Utrecht?', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1699,6 +1721,7 @@ async def test_google_model_code_execution_tool(allow_model_requests: None, goog [ ModelRequest( parts=[UserPromptPart(content='What day is tomorrow?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1888,7 +1911,7 @@ async def test_google_model_empty_assistant_response(allow_model_requests: None, result = await agent.run( 'Was your previous response empty?', message_history=[ - ModelRequest(parts=[UserPromptPart(content='Hi')]), + ModelRequest(parts=[UserPromptPart(content='Hi')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart(content='')]), ], ) @@ -1927,6 +1950,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1964,6 +1988,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2015,6 +2040,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2025,29 +2051,20 @@ def dummy() -> None: ... # pragma: no cover signature=IsStr(), provider_name='openai', ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), - ThinkingPart( - content=IsStr(), - id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689', - ), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), + ThinkingPart(content=IsStr(), id='rs_68c1fb6c15c48196b964881266a03c8e0c14a8a9087e8689'), TextPart(content=IsStr(), id='msg_68c1fb814fdc8196aec1a46164ddf7680c14a8a9087e8689'), ], usage=RequestUsage(input_tokens=45, output_tokens=1719, details={'reasoning_tokens': 1408}), model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 9, 10, 22, 27, 55, tzinfo=datetime.timezone.utc), + }, provider_response_id='resp_68c1fb6b6a248196a6216e80fc2ace380c14a8a9087e8689', finish_reason='stop', run_id=IsStr(), @@ -2073,6 +2090,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2129,6 +2147,7 @@ def dummy() -> None: ... # pragma: no cover timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2325,9 +2344,10 @@ async def test_google_url_input( parts=[ UserPromptPart( content=['What is the main content of this URL?', Is(url)], - timestamp=IsDatetime(), + timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2336,7 +2356,7 @@ async def test_google_url_input( model_name='gemini-2.0-flash', timestamp=IsDatetime(), provider_name='google-vertex', - provider_details={'finish_reason': 'STOP'}, + provider_details={'finish_reason': 'STOP', 'timestamp': IsDatetime()}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -2367,9 +2387,10 @@ async def test_google_url_input_force_download( parts=[ UserPromptPart( content=['What is the main content of this URL?', Is(video_url)], - timestamp=IsDatetime(), + timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2377,9 +2398,9 @@ async def test_google_url_input_force_download( usage=IsInstance(RequestUsage), model_name='gemini-2.0-flash', timestamp=IsDatetime(), - provider_details={'finish_reason': 'STOP'}, - provider_response_id=IsStr(), provider_name='google-vertex', + provider_details={'finish_reason': 'STOP', 'timestamp': IsDatetime()}, + provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), ), @@ -2420,6 +2441,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2444,6 +2466,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2474,6 +2497,7 @@ async def bar() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2516,6 +2540,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2540,6 +2565,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2570,6 +2596,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2602,6 +2629,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2626,6 +2654,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2690,6 +2719,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2743,6 +2773,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2797,6 +2828,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2843,6 +2875,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2867,6 +2900,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2911,6 +2945,7 @@ class CountryLanguage(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3116,6 +3151,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3156,6 +3192,7 @@ async def test_google_image_generation(allow_model_requests: None, google_provid timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3231,6 +3268,7 @@ async def test_google_image_generation_stream(allow_model_requests: None, google timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3303,6 +3341,7 @@ async def test_google_image_generation_with_text(allow_model_requests: None, goo timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3419,6 +3458,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3453,6 +3493,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3527,6 +3568,7 @@ async def test_google_image_generation_with_web_search(allow_model_requests: Non timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4092,6 +4134,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4113,7 +4156,10 @@ def get_country() -> str: model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 11, 21, 21, 57, 19, tzinfo=datetime.timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -4127,6 +4173,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4146,7 +4193,10 @@ def get_country() -> str: model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime.datetime(2025, 11, 21, 21, 57, 25, tzinfo=datetime.timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -4189,6 +4239,7 @@ def get_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -4255,7 +4306,7 @@ async def test_google_api_non_http_error( async def test_google_model_retrying_after_empty_response(allow_model_requests: None, google_provider: GoogleProvider): message_history = [ - ModelRequest(parts=[UserPromptPart(content='Hi')]), + ModelRequest(parts=[UserPromptPart(content='Hi')], timestamp=IsDatetime()), ModelResponse(parts=[]), ] @@ -4267,7 +4318,11 @@ async def test_google_model_retrying_after_empty_response(allow_model_requests: assert result.output == snapshot('Hello! How can I help you today?') assert result.new_messages() == snapshot( [ - ModelRequest(parts=[], run_id=IsStr()), + ModelRequest( + parts=[], + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), ModelResponse( parts=[ TextPart( diff --git a/tests/models/test_groq.py b/tests/models/test_groq.py index dd3395750e..1e37b4cb20 100644 --- a/tests/models/test_groq.py +++ b/tests/models/test_groq.py @@ -48,7 +48,7 @@ from pydantic_ai.output import NativeOutput, PromptedOutput from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsDatetime, IsInstance, IsNow, IsStr, raise_if_exception, try_import +from ..conftest import IsDatetime, IsInstance, IsStr, raise_if_exception, try_import from .mock_async_stream import MockAsyncStream with try_import() as imports_successful: @@ -167,29 +167,37 @@ async def test_request_simple_success(allow_model_requests: None): assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( - parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -234,7 +242,8 @@ async def test_request_structured_response(allow_model_requests: None): assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='Hello', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -246,9 +255,12 @@ async def test_request_structured_response(allow_model_requests: None): ) ], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -259,9 +271,10 @@ async def test_request_structured_response(allow_model_requests: None): tool_name='final_result', content='Final result processed.', tool_call_id='123', - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -325,9 +338,10 @@ async def get_location(loc_name: str) -> str: [ ModelRequest( parts=[ - SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), - UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), + SystemPromptPart(content='this is the system prompt', timestamp=IsDatetime()), + UserPromptPart(content='Hello', timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -340,9 +354,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -353,9 +370,10 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', content='Wrong location, please try again', tool_call_id='1', - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -368,9 +386,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=3, output_tokens=2), model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -381,17 +402,21 @@ async def get_location(loc_name: str) -> str: tool_name='get_location', content='{"lat": 51, "lng": 0}', tool_call_id='2', - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], model_name='llama-3.3-70b-versatile-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -497,7 +522,8 @@ async def test_stream_structured(allow_model_requests: None): assert result.all_messages() == snapshot( [ ModelRequest( - parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))], + parts=[UserPromptPart(content='', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -509,8 +535,9 @@ async def test_stream_structured(allow_model_requests: None): ) ], model_name='llama-3.3-70b-versatile', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='groq', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='x', run_id=IsStr(), ), @@ -520,9 +547,10 @@ async def test_stream_structured(allow_model_requests: None): tool_name='final_result', content='Final result processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=timezone.utc), + timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -607,6 +635,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -615,7 +644,10 @@ async def get_image() -> BinaryContent: model_name='meta-llama/llama-4-scout-17b-16e-instruct', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 29, 20, 21, 45, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-3c327c89-e9f5-4aac-a5d5-190e6f6f25c9', finish_reason='tool_call', run_id=IsStr(), @@ -636,6 +668,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -644,7 +677,10 @@ async def get_image() -> BinaryContent: model_name='meta-llama/llama-4-scout-17b-16e-instruct', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 29, 20, 21, 47, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-82dfad42-6a28-4089-82c3-c8633f626c0d', finish_reason='stop', run_id=IsStr(), @@ -733,6 +769,7 @@ async def test_groq_model_instructions(allow_model_requests: None, groq_api_key: [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -742,7 +779,10 @@ async def test_groq_model_instructions(allow_model_requests: None, groq_api_key: model_name='llama-3.3-70b-versatile', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 7, 16, 32, 53, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-7586b6a9-fb4b-4ec7-86a0-59f0a77844cf', finish_reason='stop', run_id=IsStr(), @@ -772,6 +812,7 @@ async def test_groq_model_web_search_tool(allow_model_requests: None, groq_api_k timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1010,7 +1051,10 @@ async def test_groq_model_web_search_tool(allow_model_requests: None, groq_api_k model_name='groq/compound', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 14, 13, tzinfo=timezone.utc), + }, provider_response_id='stub', finish_reason='stop', run_id=IsStr(), @@ -1042,6 +1086,7 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1177,7 +1222,10 @@ async def test_groq_model_web_search_tool_stream(allow_model_requests: None, gro model_name='groq/compound', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 20, 46, tzinfo=timezone.utc), + }, provider_response_id='stub', finish_reason='stop', run_id=IsStr(), @@ -1933,6 +1981,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key [ ModelRequest( parts=[UserPromptPart(content='I want a recipe to cook Uruguayan alfajores.', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -1942,7 +1991,10 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key model_name='deepseek-r1-distill-llama-70b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 19, 12, 3, 5, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1959,6 +2011,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key [ ModelRequest( parts=[UserPromptPart(content='I want a recipe to cook Uruguayan alfajores.', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -1968,7 +2021,10 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key model_name='deepseek-r1-distill-llama-70b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 19, 12, 3, 5, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-9748c1af-1065-410a-969a-d7fb48039fbb', finish_reason='stop', run_id=IsStr(), @@ -1980,6 +2036,7 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -1989,7 +2046,10 @@ async def test_groq_model_thinking_part(allow_model_requests: None, groq_api_key model_name='deepseek-r1-distill-llama-70b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 19, 12, 3, 10, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-994aa228-883a-498c-8b20-9655d770b697', finish_reason='stop', run_id=IsStr(), @@ -2023,6 +2083,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -2109,7 +2170,10 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap model_name='deepseek-r1-distill-llama-70b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 29, 56, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-4ef92b12-fb9d-486f-8b98-af9b5ecac736', finish_reason='stop', run_id=IsStr(), @@ -3366,6 +3430,7 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='You are a chef.', run_id=IsStr(), ), @@ -3471,7 +3536,10 @@ async def test_groq_model_thinking_part_iter(allow_model_requests: None, groq_ap model_name='deepseek-r1-distill-llama-70b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 17, 21, 30, 1, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-dd0af56b-f71d-4101-be2f-89efcf3f05ac', finish_reason='stop', run_id=IsStr(), @@ -5312,6 +5380,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5351,6 +5420,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5369,7 +5439,10 @@ async def get_something_by_name(name: str) -> str: model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 9, 2, 21, 3, 54, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -5383,6 +5456,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5397,7 +5471,10 @@ async def get_something_by_name(name: str) -> str: model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 21, 3, 57, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5433,6 +5510,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5454,6 +5532,7 @@ async def get_something_by_name(name: str) -> str: model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', + provider_details={'timestamp': datetime(2025, 9, 2, 21, 23, 3, tzinfo=timezone.utc)}, provider_response_id='chatcmpl-4e0ca299-7515-490a-a98a-16d7664d4fba', run_id=IsStr(), ), @@ -5479,6 +5558,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5495,7 +5575,10 @@ async def get_something_by_name(name: str) -> str: model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 9, 2, 21, 23, 4, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-fffa1d41-1763-493a-9ced-083bd3f2d98b', finish_reason='tool_call', run_id=IsStr(), @@ -5509,6 +5592,7 @@ async def get_something_by_name(name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Be concise. Never use pretty double quotes, just regular ones.', run_id=IsStr(), ), @@ -5518,7 +5602,10 @@ async def get_something_by_name(name: str) -> str: model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 21, 23, 4, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-fe6b5685-166f-4c71-9cd7-3d5a97301bf1', finish_reason='stop', run_id=IsStr(), @@ -5560,6 +5647,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -5573,7 +5661,10 @@ class CityLocation(BaseModel): model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 20, 1, 5, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5603,6 +5694,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -5616,7 +5708,10 @@ class CityLocation(BaseModel): model_name='openai/gpt-oss-120b', timestamp=IsDatetime(), provider_name='groq', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 2, 20, 1, 6, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_huggingface.py b/tests/models/test_huggingface.py index 56d74ed619..6d0ca4a010 100644 --- a/tests/models/test_huggingface.py +++ b/tests/models/test_huggingface.py @@ -167,9 +167,12 @@ async def test_simple_completion(allow_model_requests: None, huggingface_api_key ], usage=RequestUsage(input_tokens=30, output_tokens=29), model_name='Qwen/Qwen2.5-72B-Instruct-fast', - timestamp=datetime(2025, 7, 8, 13, 42, 33, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 8, 13, 42, 33, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-d445c0d473a84791af2acf356cc00df7', run_id=IsStr(), ) @@ -237,9 +240,12 @@ async def test_request_structured_response( ) ], model_name='hf-model', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ) @@ -361,6 +367,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -373,9 +380,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -388,6 +398,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -400,9 +411,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -415,14 +429,18 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -640,15 +658,19 @@ async def test_image_url_input(allow_model_requests: None, huggingface_api_key: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='Hello! How can I assist you with this image of a potato?')], usage=RequestUsage(input_tokens=269, output_tokens=15), model_name='Qwen/Qwen2.5-VL-72B-Instruct', - timestamp=datetime(2025, 7, 8, 14, 4, 39, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 8, 14, 4, 39, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-49aa100effab4ca28514d5ccc00d7944', run_id=IsStr(), ), @@ -709,6 +731,7 @@ def simple_instructions(ctx: RunContext): [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -718,7 +741,10 @@ def simple_instructions(ctx: RunContext): model_name='Qwen/Qwen2.5-72B-Instruct-fast', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 2, 15, 39, 17, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-b3936940372c481b8d886e596dc75524', run_id=IsStr(), ), @@ -807,14 +833,18 @@ def response_validator(value: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='invalid-response')], model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -827,14 +857,18 @@ def response_validator(value: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final-response')], model_name='hf-model', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', run_id=IsStr(), ), @@ -858,7 +892,7 @@ async def test_thinking_part_in_history(allow_model_requests: None): model = HuggingFaceModel('hf-model', provider=HuggingFaceProvider(hf_client=mock_client, api_key='x')) agent = Agent(model) messages = [ - ModelRequest(parts=[UserPromptPart(content='request')]), + ModelRequest(parts=[UserPromptPart(content='request')], timestamp=IsDatetime()), ModelResponse( parts=[ TextPart(content='text 1'), @@ -925,6 +959,7 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -936,7 +971,10 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap model_name='Qwen/Qwen3-235B-A22B', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 9, 13, 17, 45, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-957db61fe60d4440bcfe1f11f2c5b4b9', run_id=IsStr(), ), @@ -959,6 +997,7 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -970,7 +1009,10 @@ async def test_hf_model_thinking_part(allow_model_requests: None, huggingface_ap model_name='Qwen/Qwen3-235B-A22B', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 9, 13, 18, 14, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-35fdec1307634f94a39f7e26f52e12a7', run_id=IsStr(), ), @@ -1000,6 +1042,7 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1010,7 +1053,10 @@ async def test_hf_model_thinking_part_iter(allow_model_requests: None, huggingfa model_name='Qwen/Qwen3-235B-A22B', timestamp=IsDatetime(), provider_name='huggingface', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 7, 23, 19, 58, 41, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-357f347a3f5d4897b36a128fb4e4cf7b', run_id=IsStr(), ), diff --git a/tests/models/test_instrumented.py b/tests/models/test_instrumented.py index 651a5b6a41..f119678219 100644 --- a/tests/models/test_instrumented.py +++ b/tests/models/test_instrumented.py @@ -45,7 +45,7 @@ from pydantic_ai.settings import ModelSettings from pydantic_ai.usage import RequestUsage -from ..conftest import IsInt, IsStr, try_import +from ..conftest import IsDatetime, IsInt, IsStr, try_import with try_import() as imports_successful: from logfire.testing import CaptureLogfire @@ -151,7 +151,8 @@ async def test_instrumented_model(capfire: CaptureLogfire): RetryPromptPart('retry_prompt1', tool_name='tool4', tool_call_id='tool_call_4'), RetryPromptPart('retry_prompt2'), {}, # test unexpected parts # type: ignore - ] + ], + timestamp=IsDatetime(), ), ModelResponse(parts=[TextPart('text3')]), ] @@ -362,7 +363,7 @@ async def test_instrumented_model_not_recording(): InstrumentationSettings(tracer_provider=NoOpTracerProvider(), event_logger_provider=NoOpEventLoggerProvider()), ) - messages: list[ModelMessage] = [ModelRequest(parts=[SystemPromptPart('system_prompt')])] + messages: list[ModelMessage] = [ModelRequest(parts=[SystemPromptPart('system_prompt')], timestamp=IsDatetime())] await model.request( messages, model_settings=ModelSettings(temperature=1), @@ -384,7 +385,8 @@ async def test_instrumented_model_stream(capfire: CaptureLogfire): ModelRequest( parts=[ UserPromptPart('user_prompt'), - ] + ], + timestamp=IsDatetime(), ), ] async with model.request_stream( @@ -487,7 +489,8 @@ async def test_instrumented_model_stream_break(capfire: CaptureLogfire): ModelRequest( parts=[ UserPromptPart('user_prompt'), - ] + ], + timestamp=IsDatetime(), ), ] @@ -611,6 +614,7 @@ async def test_instrumented_model_attributes_mode(capfire: CaptureLogfire, instr RetryPromptPart('retry_prompt2'), {}, # test unexpected parts # type: ignore ], + timestamp=IsDatetime(), ), ModelResponse(parts=[TextPart('text3')]), ] @@ -991,7 +995,7 @@ def __repr__(self): messages = [ ModelResponse(parts=[ToolCallPart('tool', {'arg': Foo()}, tool_call_id='tool_call_id')]), - ModelRequest(parts=[ToolReturnPart('tool', Bar(), tool_call_id='return_tool_call_id')]), + ModelRequest(parts=[ToolReturnPart('tool', Bar(), tool_call_id='return_tool_call_id')], timestamp=IsDatetime()), ] settings = InstrumentationSettings() @@ -1030,7 +1034,7 @@ def __repr__(self): def test_messages_to_otel_events_instructions(): messages = [ - ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')]), + ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), ] settings = InstrumentationSettings() @@ -1056,9 +1060,9 @@ def test_messages_to_otel_events_instructions(): def test_messages_to_otel_events_instructions_multiple_messages(): messages = [ - ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')]), + ModelRequest(instructions='instructions', parts=[UserPromptPart('user_prompt')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), - ModelRequest(instructions='instructions2', parts=[UserPromptPart('user_prompt2')]), + ModelRequest(instructions='instructions2', parts=[UserPromptPart('user_prompt2')], timestamp=IsDatetime()), ] settings = InstrumentationSettings() assert [InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(messages)] == snapshot( @@ -1085,10 +1089,22 @@ def test_messages_to_otel_events_instructions_multiple_messages(): def test_messages_to_otel_events_image_url(document_content: BinaryContent): messages = [ - ModelRequest(parts=[UserPromptPart(content=['user_prompt', ImageUrl('https://example.com/image.png')])]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt2', AudioUrl('https://example.com/audio.mp3')])]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt3', DocumentUrl('https://example.com/document.pdf')])]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt4', VideoUrl('https://example.com/video.mp4')])]), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt', ImageUrl('https://example.com/image.png')])], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt2', AudioUrl('https://example.com/audio.mp3')])], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt3', DocumentUrl('https://example.com/document.pdf')])], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[UserPromptPart(content=['user_prompt4', VideoUrl('https://example.com/video.mp4')])], + timestamp=IsDatetime(), + ), ModelRequest( parts=[ UserPromptPart( @@ -1100,9 +1116,10 @@ def test_messages_to_otel_events_image_url(document_content: BinaryContent): VideoUrl('https://example.com/video2.mp4'), ] ) - ] + ], + timestamp=IsDatetime(), ), - ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])]), + ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), ModelResponse(parts=[FilePart(content=document_content)]), ] @@ -1242,7 +1259,7 @@ def test_messages_to_otel_events_image_url(document_content: BinaryContent): def test_messages_to_otel_events_without_binary_content(document_content: BinaryContent): messages: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])]), + ModelRequest(parts=[UserPromptPart(content=['user_prompt6', document_content])], timestamp=IsDatetime()), ] settings = InstrumentationSettings(include_binary_content=False) assert [InstrumentedModel.event_to_dict(e) for e in settings.messages_to_otel_events(messages)] == snapshot( @@ -1270,7 +1287,7 @@ def test_messages_to_otel_events_without_binary_content(document_content: Binary def test_messages_without_content(document_content: BinaryContent): messages: list[ModelMessage] = [ - ModelRequest(parts=[SystemPromptPart('system_prompt')]), + ModelRequest(parts=[SystemPromptPart('system_prompt')], timestamp=IsDatetime()), ModelResponse(parts=[TextPart('text1')]), ModelRequest( parts=[ @@ -1284,13 +1301,17 @@ def test_messages_without_content(document_content: BinaryContent): document_content, ] ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse(parts=[TextPart('text2'), ToolCallPart(tool_name='my_tool', args={'a': 13, 'b': 4})]), - ModelRequest(parts=[ToolReturnPart('tool', 'tool_return_content', 'tool_call_1')]), - ModelRequest(parts=[RetryPromptPart('retry_prompt', tool_name='tool', tool_call_id='tool_call_2')]), - ModelRequest(parts=[UserPromptPart(content=['user_prompt2', document_content])]), - ModelRequest(parts=[UserPromptPart('simple text prompt')]), + ModelRequest(parts=[ToolReturnPart('tool', 'tool_return_content', 'tool_call_1')], timestamp=IsDatetime()), + ModelRequest( + parts=[RetryPromptPart('retry_prompt', tool_name='tool', tool_call_id='tool_call_2')], + timestamp=IsDatetime(), + ), + ModelRequest(parts=[UserPromptPart(content=['user_prompt2', document_content])], timestamp=IsDatetime()), + ModelRequest(parts=[UserPromptPart('simple text prompt')], timestamp=IsDatetime()), ModelResponse(parts=[FilePart(content=document_content)]), ] settings = InstrumentationSettings(include_content=False) @@ -1464,7 +1485,7 @@ def test_deprecated_event_mode_warning(): async def test_response_cost_error(capfire: CaptureLogfire, monkeypatch: pytest.MonkeyPatch): model = InstrumentedModel(MyModel()) - messages: list[ModelMessage] = [ModelRequest(parts=[UserPromptPart('user_prompt')])] + messages: list[ModelMessage] = [ModelRequest(parts=[UserPromptPart('user_prompt')], timestamp=IsDatetime())] monkeypatch.setattr(ModelResponse, 'cost', None) with warns( @@ -1623,7 +1644,9 @@ def test_cache_point_in_user_prompt(): OpenTelemetry message parts output. """ messages: list[ModelMessage] = [ - ModelRequest(parts=[UserPromptPart(content=['text before', CachePoint(), 'text after'])]), + ModelRequest( + parts=[UserPromptPart(content=['text before', CachePoint(), 'text after'])], timestamp=IsDatetime() + ), ] settings = InstrumentationSettings() @@ -1645,7 +1668,8 @@ def test_cache_point_in_user_prompt(): ModelRequest( parts=[ UserPromptPart(content=['first', CachePoint(), 'second', CachePoint(), 'third']), - ] + ], + timestamp=IsDatetime(), ), ] assert settings.messages_to_otel_messages(messages_multi) == snapshot( @@ -1674,7 +1698,8 @@ def test_cache_point_in_user_prompt(): 'question', ] ), - ] + ], + timestamp=IsDatetime(), ), ] assert settings.messages_to_otel_messages(messages_mixed) == snapshot( diff --git a/tests/models/test_mcp_sampling.py b/tests/models/test_mcp_sampling.py index 1da0851c20..c4094ce818 100644 --- a/tests/models/test_mcp_sampling.py +++ b/tests/models/test_mcp_sampling.py @@ -11,7 +11,7 @@ from pydantic_ai.agent import Agent from pydantic_ai.exceptions import UnexpectedModelBehavior -from ..conftest import IsNow, IsStr, try_import +from ..conftest import IsDatetime, IsNow, IsStr, try_import with try_import() as imports_successful: from mcp import CreateMessageResult @@ -55,6 +55,7 @@ def test_assistant_text(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -92,6 +93,7 @@ def test_assistant_text_history(): [ ModelRequest( parts=[UserPromptPart(content='1', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), instructions='testing', run_id=IsStr(), ), @@ -103,6 +105,7 @@ def test_assistant_text_history(): ), ModelRequest( parts=[UserPromptPart(content='2', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), instructions='testing', run_id=IsStr(), ), @@ -125,7 +128,8 @@ def test_assistant_text_history_complex(): content=['a string', BinaryContent(data=base64.b64encode(b'data'), media_type='image/jpeg')] ), SystemPromptPart(content='system content'), - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='text content')], diff --git a/tests/models/test_mistral.py b/tests/models/test_mistral.py index 32a56c6395..bc6e50eddd 100644 --- a/tests/models/test_mistral.py +++ b/tests/models/test_mistral.py @@ -215,6 +215,7 @@ async def test_multiple_completions(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -230,15 +231,19 @@ async def test_multiple_completions(allow_model_requests: None): ), ModelRequest( parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='hello again')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -279,45 +284,57 @@ async def test_three_completions(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( parts=[UserPromptPart(content='hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='hello again')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( parts=[UserPromptPart(content='final message', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final message')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -426,6 +443,7 @@ class CityLocation(BaseModel): [ ModelRequest( parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -438,9 +456,12 @@ class CityLocation(BaseModel): ], usage=RequestUsage(input_tokens=1, output_tokens=2), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -454,6 +475,7 @@ class CityLocation(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -494,6 +516,7 @@ class CityLocation(BaseModel): [ ModelRequest( parts=[UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -506,9 +529,12 @@ class CityLocation(BaseModel): ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -522,6 +548,7 @@ class CityLocation(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -559,6 +586,7 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque SystemPromptPart(content='System prompt value', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -571,9 +599,12 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -587,6 +618,7 @@ async def test_request_output_type_with_arguments_str_response(allow_model_reque timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1114,6 +1146,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1126,9 +1159,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1142,6 +1178,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1154,9 +1191,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=3, output_tokens=2), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1170,15 +1210,19 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1271,6 +1315,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1283,9 +1328,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1299,6 +1347,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1311,9 +1360,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=3, output_tokens=2), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1327,6 +1379,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1339,9 +1392,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=1), model_name='mistral-large-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -1355,6 +1411,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1418,7 +1475,7 @@ async def get_location(loc_name: str) -> str: v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot([{'won': True}]) assert result.is_complete - assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) + assert result.timestamp() == IsNow(tz=timezone.utc) assert result.usage().input_tokens == 4 assert result.usage().output_tokens == 4 @@ -1432,6 +1489,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1444,9 +1502,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1460,15 +1521,19 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args='{"won": true}', tool_call_id='1')], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1482,6 +1547,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1532,7 +1598,7 @@ async def get_location(loc_name: str) -> str: v = [c async for c in result.stream_output(debounce_by=None)] assert v == snapshot(['final ', 'final response']) assert result.is_complete - assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) + assert result.timestamp() == IsNow(tz=timezone.utc) assert result.usage().input_tokens == 6 assert result.usage().output_tokens == 6 @@ -1546,6 +1612,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1558,9 +1625,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1574,15 +1644,19 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=4, output_tokens=4), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='stop', run_id=IsStr(), @@ -1648,7 +1722,7 @@ async def get_location(loc_name: str) -> str: v = [c async for c in result.stream_text(debounce_by=None)] assert v == snapshot(['final ', 'final response']) assert result.is_complete - assert result.timestamp() == datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc) + assert result.timestamp() == IsNow(tz=timezone.utc) assert result.usage().input_tokens == 7 assert result.usage().output_tokens == 7 @@ -1662,6 +1736,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='User prompt value', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1674,9 +1749,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=2, output_tokens=2), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1690,6 +1768,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1702,9 +1781,12 @@ async def get_location(loc_name: str) -> str: ], usage=RequestUsage(input_tokens=1, output_tokens=1), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='tool_call', run_id=IsStr(), @@ -1718,15 +1800,19 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], usage=RequestUsage(input_tokens=4, output_tokens=4), model_name='gpt-4', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='x', finish_reason='stop', run_id=IsStr(), @@ -1898,6 +1984,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1906,7 +1993,10 @@ async def get_image() -> BinaryContent: model_name='pixtral-12b-latest', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 11, 28, 2, 19, 58, tzinfo=timezone.utc), + }, provider_response_id='412174432ea945889703eac58b44ae35', finish_reason='tool_call', run_id=IsStr(), @@ -1927,6 +2017,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1939,7 +2030,10 @@ async def get_image() -> BinaryContent: model_name='pixtral-12b-latest', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 11, 28, 2, 20, 5, tzinfo=timezone.utc), + }, provider_response_id='049b5c7704554d3396e727a95cb6d947', finish_reason='stop', run_id=IsStr(), @@ -1975,6 +2069,7 @@ async def test_image_url_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1983,7 +2078,10 @@ async def test_image_url_input(allow_model_requests: None): model_name='mistral-large-123', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2016,6 +2114,7 @@ async def test_image_as_binary_content_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2024,7 +2123,10 @@ async def test_image_as_binary_content_input(allow_model_requests: None): model_name='mistral-large-123', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2060,6 +2162,7 @@ async def test_pdf_url_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2068,7 +2171,10 @@ async def test_pdf_url_input(allow_model_requests: None): model_name='mistral-large-123', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2098,6 +2204,7 @@ async def test_pdf_as_binary_content_input(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2106,7 +2213,10 @@ async def test_pdf_as_binary_content_input(allow_model_requests: None): model_name='mistral-large-123', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2193,6 +2303,7 @@ async def test_mistral_model_instructions(allow_model_requests: None, mistral_ap [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2202,7 +2313,10 @@ async def test_mistral_model_instructions(allow_model_requests: None, mistral_ap model_name='mistral-large-123', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -2222,6 +2336,7 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2240,7 +2355,10 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 5, 22, 29, 38, tzinfo=timezone.utc), + }, provider_response_id='resp_68bb6452990081968f5aff503a55e3b903498c8aa840cf12', finish_reason='stop', run_id=IsStr(), @@ -2263,6 +2381,7 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2274,7 +2393,10 @@ async def test_mistral_model_thinking_part(allow_model_requests: None, openai_ap model_name='magistral-medium-latest', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 5, 22, 30, tzinfo=timezone.utc), + }, provider_response_id='9abe8b736bff46af8e979b52334a57cd', finish_reason='stop', run_id=IsStr(), @@ -2305,6 +2427,7 @@ async def test_mistral_model_thinking_part_iter(allow_model_requests: None, mist timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2338,7 +2461,10 @@ async def test_mistral_model_thinking_part_iter(allow_model_requests: None, mist model_name='magistral-medium-latest', timestamp=IsDatetime(), provider_name='mistral', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 11, 28, 2, 19, 53, tzinfo=timezone.utc), + }, provider_response_id='9f9d90210f194076abeee223863eaaf0', finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_model_function.py b/tests/models/test_model_function.py index 196b140454..f774682490 100644 --- a/tests/models/test_model_function.py +++ b/tests/models/test_model_function.py @@ -27,7 +27,7 @@ from pydantic_ai.result import RunUsage from pydantic_ai.usage import RequestUsage -from ..conftest import IsNow, IsStr +from ..conftest import IsDatetime, IsNow, IsStr pytestmark = pytest.mark.anyio @@ -68,6 +68,7 @@ def test_simple(): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -86,6 +87,7 @@ def test_simple(): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -97,6 +99,7 @@ def test_simple(): ), ModelRequest( parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -167,6 +170,7 @@ def test_weather(): [ ModelRequest( parts=[UserPromptPart(content='London', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -189,6 +193,7 @@ def test_weather(): tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -207,6 +212,7 @@ def test_weather(): tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -371,6 +377,7 @@ def test_call_all(): SystemPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -404,6 +411,7 @@ def test_call_all(): tool_name='quz', content='a', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -477,6 +485,7 @@ async def test_stream_text(): [ ModelRequest( parts=[UserPromptPart(content='', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_model_test.py b/tests/models/test_model_test.py index f7a6809a71..ae357f481c 100644 --- a/tests/models/test_model_test.py +++ b/tests/models/test_model_test.py @@ -34,7 +34,7 @@ from pydantic_ai.models.test import TestModel, _chars, _JsonSchemaTestData # pyright: ignore[reportPrivateUsage] from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsNow, IsStr +from ..conftest import IsDatetime, IsNow, IsStr def test_call_one(): @@ -78,6 +78,7 @@ def test_custom_output_args(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -102,6 +103,7 @@ def test_custom_output_args(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -125,6 +127,7 @@ class Foo(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -149,6 +152,7 @@ class Foo(BaseModel): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -168,6 +172,7 @@ def test_output_type(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -192,6 +197,7 @@ def test_output_type(): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -218,6 +224,7 @@ async def my_ret(x: int) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -236,6 +243,7 @@ async def my_ret(x: int) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -251,6 +259,7 @@ async def my_ret(x: int) -> str: tool_name='my_ret', content='1', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/models/test_openai.py b/tests/models/test_openai.py index ed68edd94f..e1c4983d4b 100644 --- a/tests/models/test_openai.py +++ b/tests/models/test_openai.py @@ -125,28 +125,36 @@ async def test_request_simple_success(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), ), ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='world')], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + timestamp=IsNow(tz=timezone.utc), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -193,6 +201,41 @@ async def test_request_simple_usage(allow_model_requests: None): ) +async def test_response_with_created_timestamp_but_no_provider_details(allow_model_requests: None): + class MinimalOpenAIChatModel(OpenAIChatModel): + def _process_provider_details(self, response: chat.ChatCompletion) -> dict[str, Any] | None: + return None + + c = completion_message(ChatCompletionMessage(content='world', role='assistant')) + mock_client = MockOpenAI.create_mock(c) + m = MinimalOpenAIChatModel('gpt-4o', provider=OpenAIProvider(openai_client=mock_client)) + agent = Agent(m) + + result = await agent.run('hello') + assert result.output == 'world' + assert result.all_messages() == snapshot( + [ + ModelRequest( + parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), + run_id=IsStr(), + ), + ModelResponse( + parts=[TextPart(content='world')], + model_name='gpt-4o-123', + timestamp=IsNow(tz=timezone.utc), + provider_name='openai', + provider_details={ + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, + provider_response_id='123', + finish_reason='stop', + run_id=IsStr(), + ), + ] + ) + + async def test_openai_chat_image_detail_vendor_metadata(allow_model_requests: None): c = completion_message( ChatCompletionMessage(content='done', role='assistant'), @@ -238,6 +281,7 @@ async def test_request_structured_response(allow_model_requests: None): [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -249,9 +293,12 @@ async def test_request_structured_response(allow_model_requests: None): ) ], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -265,6 +312,7 @@ async def test_request_structured_response(allow_model_requests: None): timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -333,6 +381,7 @@ async def get_location(loc_name: str) -> str: SystemPromptPart(content='this is the system prompt', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -349,9 +398,12 @@ async def get_location(loc_name: str) -> str: output_tokens=1, ), model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -365,6 +417,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -381,9 +434,12 @@ async def get_location(loc_name: str) -> str: output_tokens=2, ), model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -397,14 +453,18 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( parts=[TextPart(content='final response')], model_name='gpt-4o-123', - timestamp=datetime(2024, 1, 1, tzinfo=timezone.utc), + timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', run_id=IsStr(), @@ -477,7 +537,10 @@ async def test_stream_text_finish_reason(allow_model_requests: None): model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc), + }, provider_response_id='123', finish_reason='stop', ) @@ -892,6 +955,7 @@ async def get_image() -> ImageUrl: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -909,7 +973,10 @@ async def get_image() -> ImageUrl: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 29, 21, 7, 59, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRmTHlrARTzAHK1na9s80xDlQGYPX', finish_reason='tool_call', run_id=IsStr(), @@ -933,6 +1000,7 @@ async def get_image() -> ImageUrl: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -950,7 +1018,10 @@ async def get_image() -> ImageUrl: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 29, 21, 8, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRmTI0Y2zmkGw27kLarhsmiFQTGxR', finish_reason='stop', run_id=IsStr(), @@ -979,6 +1050,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -996,7 +1068,10 @@ async def get_image() -> BinaryContent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 29, 20, 21, 33, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRlkLhPc87BdohVobEJJCGq3rUAG2', finish_reason='tool_call', run_id=IsStr(), @@ -1017,6 +1092,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1034,7 +1110,10 @@ async def get_image() -> BinaryContent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 29, 20, 21, 36, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BRlkORPA5rXMV3uzcOcgK4eQFKCVW', finish_reason='stop', run_id=IsStr(), @@ -1240,6 +1319,7 @@ async def test_message_history_can_start_with_model_response(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1257,7 +1337,10 @@ async def test_message_history_can_start_with_model_response(allow_model_request model_name='gpt-4.1-mini-2025-04-14', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 11, 22, 10, 1, 40, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Ceeiy4ivEE0hcL1EX5ZfLuW5xNUXB', finish_reason='stop', run_id=IsStr(), @@ -2044,6 +2127,7 @@ async def test_openai_instructions(allow_model_requests: None, openai_api_key: s [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2062,7 +2146,10 @@ async def test_openai_instructions(allow_model_requests: None, openai_api_key: s model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 7, 16, 30, 56, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BJjf61mLb9z5H45ClJzbx0UWKwjo1', finish_reason='stop', run_id=IsStr(), @@ -2093,6 +2180,7 @@ async def get_temperature(city: str) -> float: [ ModelRequest( parts=[UserPromptPart(content='What is the temperature in Tokyo?', timestamp=IsDatetime())], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2111,7 +2199,10 @@ async def get_temperature(city: str) -> float: model_name='gpt-4.1-mini-2025-04-14', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 4, 16, 13, 37, 14, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BMxEwRA0p0gJ52oKS7806KAlfMhqq', finish_reason='tool_call', run_id=IsStr(), @@ -2122,6 +2213,7 @@ async def get_temperature(city: str) -> float: tool_name='get_temperature', content=20.0, tool_call_id=IsStr(), timestamp=IsDatetime() ) ], + timestamp=IsDatetime(), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -2140,7 +2232,10 @@ async def get_temperature(city: str) -> float: model_name='gpt-4.1-mini-2025-04-14', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 4, 16, 13, 37, 15, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BMxEx6B8JEj6oDC45MOWKp0phg8UP', finish_reason='stop', run_id=IsStr(), @@ -2160,6 +2255,7 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2177,7 +2273,10 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 10, 22, 21, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68c1fa0523248197888681b898567bde093f57e27128848a', finish_reason='stop', run_id=IsStr(), @@ -2199,6 +2298,7 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2216,7 +2316,10 @@ async def test_openai_model_thinking_part(allow_model_requests: None, openai_api model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 9, 10, 22, 22, 24, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-CENUmtwDD0HdvTUYL6lUeijDtxrZL', finish_reason='stop', run_id=IsStr(), @@ -2481,7 +2584,10 @@ def test_openai_response_timestamp_milliseconds(allow_model_requests: None): result = agent.run_sync('Hello') response = cast(ModelResponse, result.all_messages()[-1]) - assert response.timestamp == snapshot(datetime(2025, 6, 1, 3, 7, 48, tzinfo=timezone.utc)) + assert response.timestamp == IsNow(tz=timezone.utc) + assert response.provider_details == snapshot( + {'finish_reason': 'stop', 'timestamp': datetime(2025, 6, 1, 3, 7, 48, tzinfo=timezone.utc)} + ) async def test_openai_tool_output(allow_model_requests: None, openai_api_key: str): @@ -2509,6 +2615,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2526,7 +2633,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 5, 1, 23, 36, 24, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXk0dWkG4hfPt0lph4oFO35iT73I', finish_reason='tool_call', run_id=IsStr(), @@ -2540,6 +2650,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2563,7 +2674,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 5, 1, 23, 36, 25, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXk1xGHYzbhXgUkSutK08bdoNv5s', finish_reason='tool_call', run_id=IsStr(), @@ -2577,6 +2691,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -2607,6 +2722,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2626,7 +2742,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 9, 21, 20, 53, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BgeDFS85bfHosRFEEAvq8reaCPCZ8', finish_reason='tool_call', run_id=IsStr(), @@ -2640,6 +2759,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2657,7 +2777,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 9, 21, 20, 54, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BgeDGX9eDyVrEI56aP2vtIHahBzFH', finish_reason='stop', run_id=IsStr(), @@ -2693,6 +2816,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2712,7 +2836,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 5, 1, 23, 36, 22, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXjyBwGuZrtuuSzNCeaWMpGv2MZ3', finish_reason='tool_call', run_id=IsStr(), @@ -2726,6 +2853,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2743,7 +2871,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 5, 1, 23, 36, 23, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-BSXjzYGu67dhTy5r8KmjJvQ4HhDVO', finish_reason='stop', run_id=IsStr(), @@ -2781,6 +2912,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2800,7 +2932,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 9, 23, 21, 26, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgg5utuCSXMQ38j0n2qgfdQKcR9VD', finish_reason='tool_call', run_id=IsStr(), @@ -2814,6 +2949,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2835,7 +2971,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 9, 23, 21, 27, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgg5vrxUtCDlvgMreoxYxPaKxANmd', finish_reason='stop', run_id=IsStr(), @@ -2869,6 +3008,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2888,7 +3028,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 10, 0, 21, 35, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh27PeOaFW6qmF04qC5uI2H9mviw', finish_reason='tool_call', run_id=IsStr(), @@ -2902,6 +3045,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2919,7 +3063,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 10, 0, 21, 36, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh28advCSFhGHPnzUevVS6g6Uwg0', finish_reason='stop', run_id=IsStr(), @@ -2957,6 +3104,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2976,7 +3124,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': datetime(2025, 6, 10, 0, 21, 38, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh2AW2NXGgMc7iS639MJXNRgtatR', finish_reason='tool_call', run_id=IsStr(), @@ -2990,6 +3141,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -3011,7 +3163,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': datetime(2025, 6, 10, 0, 21, 39, tzinfo=timezone.utc), + }, provider_response_id='chatcmpl-Bgh2BthuopRnSqCuUgMbBnOqgkDHC', finish_reason='stop', run_id=IsStr(), diff --git a/tests/models/test_openai_responses.py b/tests/models/test_openai_responses.py index 47bdeaea3f..467c59281e 100644 --- a/tests/models/test_openai_responses.py +++ b/tests/models/test_openai_responses.py @@ -1,6 +1,7 @@ import json import re from dataclasses import replace +from datetime import datetime, timezone from typing import Any, cast import pytest @@ -48,7 +49,7 @@ from pydantic_ai.tools import ToolDefinition from pydantic_ai.usage import RequestUsage, RunUsage -from ..conftest import IsBytes, IsDatetime, IsStr, TestEnv, try_import +from ..conftest import IsBytes, IsDatetime, IsNow, IsStr, TestEnv, try_import from .mock_openai import MockOpenAIResponses, get_mock_responses_kwargs, response_message with try_import() as imports_successful: @@ -252,6 +253,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -273,7 +275,10 @@ async def get_location(loc_name: str) -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 3, 27, 12, 42, 44, tzinfo=timezone.utc), + }, provider_response_id='resp_67e547c48c9481918c5c4394464ce0c60ae6111e84dd5c08', finish_reason='stop', run_id=IsStr(), @@ -293,6 +298,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -310,7 +316,10 @@ async def get_location(loc_name: str) -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 3, 27, 12, 42, 45, tzinfo=timezone.utc), + }, provider_response_id='resp_67e547c5a2f08191802a1f43620f348503a2086afed73b47', finish_reason='stop', run_id=IsStr(), @@ -340,6 +349,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -355,7 +365,10 @@ async def get_image() -> BinaryContent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 4, 29, 20, 21, 39, tzinfo=timezone.utc), + }, provider_response_id='resp_681134d3aa3481919ca581a267db1e510fe7a5a4e2123dc3', finish_reason='stop', run_id=IsStr(), @@ -376,6 +389,7 @@ async def get_image() -> BinaryContent: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -389,7 +403,10 @@ async def get_image() -> BinaryContent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 4, 29, 20, 21, 41, tzinfo=timezone.utc), + }, provider_response_id='resp_681134d53c48819198ce7b89db78dffd02cbfeaababb040c', finish_reason='stop', run_id=IsStr(), @@ -491,7 +508,10 @@ async def get_capital(country: str) -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 3, 27, 13, 37, 38, tzinfo=timezone.utc), + }, provider_response_id='resp_67e554a21aa88191b65876ac5e5bbe0406c52f0e511c76ed', finish_reason='stop', ) @@ -525,6 +545,7 @@ async def test_openai_responses_model_builtin_tools_web_search(allow_model_reque timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -669,7 +690,10 @@ async def test_openai_responses_model_builtin_tools_web_search(allow_model_reque model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 23, 19, 54, tzinfo=timezone.utc), + }, provider_response_id='resp_0e3d55e9502941380068c4aa9a62f48195a373978ed720ac63', finish_reason='stop', run_id=IsStr(), @@ -688,6 +712,7 @@ async def test_openai_responses_model_instructions(allow_model_requests: None, o [ ModelRequest( parts=[UserPromptPart(content='What is the capital of France?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -702,7 +727,10 @@ async def test_openai_responses_model_instructions(allow_model_requests: None, o model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 4, 7, 16, 31, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_67f3fdfd9fa08191a3d5825db81b8df6003bc73febb56d77', finish_reason='stop', run_id=IsStr(), @@ -725,6 +753,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -766,7 +795,10 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 20, 27, 26, tzinfo=timezone.utc), + }, provider_response_id='resp_028829e50fbcad090068c9c82e1e0081958ddc581008b39428', finish_reason='stop', run_id=IsStr(), @@ -785,6 +817,7 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -826,7 +859,10 @@ async def test_openai_responses_model_web_search_tool(allow_model_requests: None model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 20, 27, 39, tzinfo=timezone.utc), + }, provider_response_id='resp_028829e50fbcad090068c9c83b9fb88195b6b84a32e1fc83c0', finish_reason='stop', run_id=IsStr(), @@ -855,6 +891,7 @@ async def test_openai_responses_model_web_search_tool_with_user_location( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -896,7 +933,10 @@ async def test_openai_responses_model_web_search_tool_with_user_location( model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 23, 21, 23, tzinfo=timezone.utc), + }, provider_response_id='resp_0b385a0fdc82fd920068c4aaf3ced88197a88711e356b032c4', finish_reason='stop', run_id=IsStr(), @@ -926,6 +966,7 @@ async def test_openai_responses_model_web_search_tool_with_invalid_region( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -967,7 +1008,10 @@ async def test_openai_responses_model_web_search_tool_with_invalid_region( model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 23, 21, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_0b4f29854724a3120068c4ab0b660081919707b95b47552782', finish_reason='stop', run_id=IsStr(), @@ -1004,6 +1048,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1051,7 +1096,10 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 21, 13, 32, tzinfo=timezone.utc), + }, provider_response_id='resp_00a60507bf41223d0068c9d2fbf93481a0ba2a7796ae2cab4c', finish_reason='stop', run_id=IsStr(), @@ -1223,6 +1271,7 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -1270,7 +1319,10 @@ async def test_openai_responses_model_web_search_tool_stream(allow_model_request model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 16, 21, 13, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_00a60507bf41223d0068c9d31574d881a090c232646860a771', finish_reason='stop', run_id=IsStr(), @@ -1363,6 +1415,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1378,7 +1431,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 43, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0b40a8819cb8d55594bc2c232a001fd29e2d5573f7', finish_reason='stop', run_id=IsStr(), @@ -1392,6 +1448,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1407,7 +1464,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 44, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0bfda8819ea65458cd7cc389b801dc81d4bc91f560', finish_reason='stop', run_id=IsStr(), @@ -1421,6 +1481,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1452,6 +1513,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1467,7 +1529,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 45, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0d9494819ea4f123bba707c9ee0356a60c98816d6a', finish_reason='stop', run_id=IsStr(), @@ -1481,6 +1546,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1494,7 +1560,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 46, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0e2b28819d9c828ef4ee526d6a03434b607c02582d', finish_reason='stop', run_id=IsStr(), @@ -1531,6 +1600,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1546,7 +1616,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0f220081a1a621d6bcdc7f31a50b8591d9001d2329', finish_reason='stop', run_id=IsStr(), @@ -1560,6 +1633,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1573,7 +1647,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f0fde708192989000a62809c6e5020197534e39cc1f', finish_reason='stop', run_id=IsStr(), @@ -1612,6 +1689,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1627,7 +1705,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 48, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f10f2d081a39b3438f413b3bafc0dd57d732903c563', finish_reason='stop', run_id=IsStr(), @@ -1641,6 +1722,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1654,7 +1736,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 0, 40, 49, tzinfo=timezone.utc), + }, provider_response_id='resp_68477f119830819da162aa6e10552035061ad97e2eef7871', finish_reason='stop', run_id=IsStr(), @@ -1689,6 +1774,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1704,7 +1790,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 11, 46, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f12d63881a1830201ed101ecfbf02f8ef7f2fb42b50', finish_reason='stop', run_id=IsStr(), @@ -1718,6 +1807,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1731,7 +1821,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 11, 55, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f1b556081918d64c9088a470bf0044fdb7d019d4115', finish_reason='stop', run_id=IsStr(), @@ -1770,6 +1863,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1785,7 +1879,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 11, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f1d38e081a1ac828acda978aa6b08e79646fe74d5ee', finish_reason='stop', run_id=IsStr(), @@ -1799,6 +1896,7 @@ async def get_user_country() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1812,7 +1910,10 @@ async def get_user_country() -> str: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 6, 10, 13, 12, 8, tzinfo=timezone.utc), + }, provider_response_id='resp_68482f28c1b081a1ae73cbbee012ee4906b4ab2d00d03024', finish_reason='stop', run_id=IsStr(), @@ -2015,6 +2116,7 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2023,6 +2125,7 @@ async def test_openai_responses_usage_without_tokens_details(allow_model_request model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -2044,6 +2147,7 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, [ ModelRequest( parts=[UserPromptPart(content='How do I cross the street?', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2068,7 +2172,10 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 22, 8, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42c902794819cb9335264c342f65407460311b0c8d3de', finish_reason='stop', run_id=IsStr(), @@ -2089,6 +2196,7 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2112,7 +2220,10 @@ async def test_openai_responses_model_thinking_part(allow_model_requests: None, model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 22, 43, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42cb3d520819c9d28b07036e9059507460311b0c8d3de', finish_reason='stop', run_id=IsStr(), @@ -2141,6 +2252,7 @@ async def test_openai_responses_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2191,6 +2303,7 @@ async def test_openai_responses_thinking_part_from_other_model( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2212,7 +2325,10 @@ async def test_openai_responses_thinking_part_from_other_model( model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 23, 30, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42ce277ac8193ba08881bcefabaf70ad492c7955fc6fc', finish_reason='stop', run_id=IsStr(), @@ -2244,6 +2360,7 @@ async def test_openai_responses_thinking_part_iter(allow_model_requests: None, o timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2275,7 +2392,10 @@ async def test_openai_responses_thinking_part_iter(allow_model_requests: None, o model_name='o3-mini-2025-01-31', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 24, 15, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42d0fb418819dbfa579f69406b49508fbf9b1584184ff', finish_reason='stop', run_id=IsStr(), @@ -2321,6 +2441,7 @@ def update_plan(plan: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", run_id=IsStr(), ), @@ -2347,7 +2468,10 @@ def update_plan(plan: str) -> str: model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 24, 40, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42d28772c819684459966ee2201ed0e8bc41441c948f6', finish_reason='stop', run_id=IsStr(), @@ -2361,6 +2485,7 @@ def update_plan(plan: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions="You are a helpful assistant that uses planning. You MUST use the update_plan tool and continually update it as you make progress against the user's prompt", run_id=IsStr(), ), @@ -2372,7 +2497,10 @@ def update_plan(plan: str) -> str: model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 25, 3, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42d3fd6a08196bce23d6be960ff8a0e8bc41441c948f6', finish_reason='stop', run_id=IsStr(), @@ -2413,6 +2541,7 @@ async def test_openai_responses_thinking_without_summary(allow_model_requests: N timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2423,6 +2552,7 @@ async def test_openai_responses_thinking_without_summary(allow_model_requests: N model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -2486,6 +2616,7 @@ async def test_openai_responses_thinking_with_multiple_summaries(allow_model_req timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2499,6 +2630,7 @@ async def test_openai_responses_thinking_with_multiple_summaries(allow_model_req model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -2551,6 +2683,7 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2567,7 +2700,10 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 27, 43, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42ddf9bbc8194aa7b97304dd909cb0202c9ad459e0d23', finish_reason='stop', run_id=IsStr(), @@ -2604,6 +2740,7 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2620,7 +2757,10 @@ async def test_openai_responses_thinking_with_modified_history(allow_model_reque model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 12, 14, 27, 48, tzinfo=timezone.utc), + }, provider_response_id='resp_68c42de4afcc819f995a1c59fe87c9d5051f82c608a83beb', finish_reason='stop', run_id=IsStr(), @@ -2652,6 +2792,7 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2704,7 +2845,10 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 17, 21, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdba511c7081a389e67b16621029c609b7445677780c8f', finish_reason='stop', run_id=IsStr(), @@ -2723,6 +2867,7 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2739,7 +2884,10 @@ async def test_openai_responses_thinking_with_code_execution_tool(allow_model_re model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 17, 46, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdba6a610481a3b4533f345bea8a7b09b7445677780c8f', finish_reason='stop', run_id=IsStr(), @@ -2777,6 +2925,7 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2837,7 +2986,10 @@ async def test_openai_responses_thinking_with_code_execution_tool_stream( model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 11, 22, 43, 36, tzinfo=timezone.utc), + }, provider_response_id='resp_68c35098e6fc819e80fb94b25b7d031b0f2d670b80edc507', finish_reason='stop', run_id=IsStr(), @@ -3597,6 +3749,7 @@ def get_meaning_of_life() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3612,7 +3765,10 @@ def get_meaning_of_life() -> int: model_name='gpt-4.1-2025-04-14', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 18, 18, 29, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68cc4fa5603481958e2143685133fe530548824120ffcf74', finish_reason='stop', run_id=IsStr(), @@ -3626,6 +3782,7 @@ def get_meaning_of_life() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3643,7 +3800,10 @@ def get_meaning_of_life() -> int: model_name='gpt-4.1-2025-04-14', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 18, 18, 29, 58, tzinfo=timezone.utc), + }, provider_response_id='resp_68cc4fa6a8a881a187b0fe1603057bff0307c6d4d2ee5985', finish_reason='stop', run_id=IsStr(), @@ -3706,6 +3866,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3775,7 +3936,10 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 56, 34, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdc382bc98819083a5b47ec92e077b0187028ba77f15f7', finish_reason='stop', run_id=IsStr(), @@ -3801,6 +3965,7 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3934,7 +4099,10 @@ async def test_openai_responses_code_execution_return_image(allow_model_requests model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 57, 1, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdc39da72481909e0512fef9d646240187028ba77f15f7', finish_reason='stop', run_id=IsStr(), @@ -3978,6 +4146,7 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4019,7 +4188,10 @@ async def test_openai_responses_code_execution_return_image_stream(allow_model_r model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 47, 35, tzinfo=timezone.utc), + }, provider_response_id='resp_06c1a26fd89d07f20068dd9367869c819788cb28e6f19eff9b', finish_reason='stop', run_id=IsStr(), @@ -5466,6 +5638,7 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5513,7 +5686,10 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 57, 58, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5538,6 +5714,7 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5585,7 +5762,10 @@ async def test_openai_responses_image_generation(allow_model_requests: None, ope model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 20, 59, 28, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5632,6 +5812,7 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5677,7 +5858,10 @@ async def test_openai_responses_image_generation_stream(allow_model_requests: No model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 40, 2, tzinfo=timezone.utc), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -5812,6 +5996,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5856,7 +6041,10 @@ async def test_openai_responses_image_generation_tool_without_image_output( model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 23, 49, 51, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdec1f3290819f99d9caba8703b251079003437d26d0c0', finish_reason='stop', run_id=IsStr(), @@ -5869,6 +6057,7 @@ async def test_openai_responses_image_generation_tool_without_image_output( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5913,7 +6102,10 @@ async def test_openai_responses_image_generation_tool_without_image_output( model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 9, 19, 23, 50, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_68cdec61d0a0819fac14ed057a9946a1079003437d26d0c0', finish_reason='stop', run_id=IsStr(), @@ -5975,6 +6167,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6017,7 +6210,10 @@ class Animal(BaseModel): model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 38, 16, tzinfo=timezone.utc), + }, provider_response_id='resp_0360827931d9421b0068dd8328c08c81a0ba854f245883906f', finish_reason='stop', run_id=IsStr(), @@ -6030,6 +6226,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6051,7 +6248,10 @@ class Animal(BaseModel): model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 39, 28, tzinfo=timezone.utc), + }, provider_response_id='resp_0360827931d9421b0068dd8370a70081a09d6de822ee43bbc4', finish_reason='stop', run_id=IsStr(), @@ -6065,6 +6265,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6090,6 +6291,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6135,7 +6337,10 @@ class Animal(BaseModel): model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 41, 59, tzinfo=timezone.utc), + }, provider_response_id='resp_09b7ce6df817433c0068dd8407c37881a0ad817ef3cc3a3600', finish_reason='stop', run_id=IsStr(), @@ -6163,6 +6368,7 @@ class Animal(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6208,7 +6414,10 @@ class Animal(BaseModel): model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 55, 9, tzinfo=timezone.utc), + }, provider_response_id='resp_0d14a5e3c26c21180068dd871d439081908dc36e63fab0cedf', finish_reason='stop', run_id=IsStr(), @@ -6242,6 +6451,7 @@ async def get_animal() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6263,7 +6473,10 @@ async def get_animal() -> str: model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 2, 36, tzinfo=timezone.utc), + }, provider_response_id='resp_0481074da98340df0068dd88dceb1481918b1d167d99bc51cd', finish_reason='stop', run_id=IsStr(), @@ -6277,6 +6490,7 @@ async def get_animal() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6313,7 +6527,10 @@ async def get_animal() -> str: model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 20, 2, 56, tzinfo=timezone.utc), + }, provider_response_id='resp_0481074da98340df0068dd88f0ba04819185a168065ef28040', finish_reason='stop', run_id=IsStr(), @@ -6344,6 +6561,7 @@ async def test_openai_responses_multiple_images(allow_model_requests: None, open timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6416,7 +6634,10 @@ async def test_openai_responses_multiple_images(allow_model_requests: None, open model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 19, 28, 22, tzinfo=timezone.utc), + }, provider_response_id='resp_0b6169df6e16e9690068dd80d64aec81919c65f238307673bb', finish_reason='stop', run_id=IsStr(), @@ -6447,6 +6668,7 @@ async def test_openai_responses_image_generation_jpeg(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6489,7 +6711,10 @@ async def test_openai_responses_image_generation_jpeg(allow_model_requests: None model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 1, 21, 28, 13, tzinfo=timezone.utc), + }, provider_response_id='resp_08acbdf1ae54befc0068dd9ced226c8197a2e974b29c565407', finish_reason='stop', run_id=IsStr(), @@ -6550,6 +6775,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6571,7 +6797,10 @@ class CityLocation(BaseModel): model_name='gpt-5-2025-08-07', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 13, 11, 30, 47, tzinfo=timezone.utc), + }, provider_response_id='resp_001fd29e2d5573f70068ece2e6dfbc819c96557f0de72802be', finish_reason='stop', run_id=IsStr(), @@ -6585,6 +6814,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6620,6 +6850,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -6745,7 +6976,10 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None model_name='o4-mini-2025-04-16', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 23, 42, 57, tzinfo=timezone.utc), + }, provider_response_id='resp_0083938b3a28070e0068fabd81970881a0a1195f2cab45bd04', finish_reason='stop', run_id=IsStr(), @@ -6764,6 +6998,7 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -6792,7 +7027,10 @@ async def test_openai_responses_model_mcp_server_tool(allow_model_requests: None model_name='o4-mini-2025-04-16', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 23, 43, 25, tzinfo=timezone.utc), + }, provider_response_id='resp_0083938b3a28070e0068fabd9d414881a089cf24784f80e021', finish_reason='stop', run_id=IsStr(), @@ -6841,6 +7079,7 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -7012,7 +7251,10 @@ async def test_openai_responses_model_mcp_server_tool_stream(allow_model_request model_name='o4-mini-2025-04-16', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 21, 40, 50, tzinfo=timezone.utc), + }, provider_response_id='resp_00b9cc7a23d047270068faa0e25934819f9c3bfdec80065bc4', finish_reason='stop', run_id=IsStr(), @@ -7231,6 +7473,7 @@ async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model parts=[ UserPromptPart(content='What do I have on my Google Calendar for today?', timestamp=IsDatetime()) ], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -7390,7 +7633,10 @@ async def test_openai_responses_model_mcp_server_tool_with_connector(allow_model model_name='o4-mini-2025-04-16', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 10, 23, 21, 41, 13, tzinfo=timezone.utc), + }, provider_response_id='resp_0558010cf1416a490068faa0f945bc81a0b6a6dfb7391030d5', finish_reason='stop', run_id=IsStr(), @@ -7484,6 +7730,7 @@ async def test_openai_responses_raw_cot_only(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7499,6 +7746,7 @@ async def test_openai_responses_raw_cot_only(allow_model_requests: None): model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7547,6 +7795,7 @@ async def test_openai_responses_raw_cot_with_summary(allow_model_requests: None) timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7563,6 +7812,7 @@ async def test_openai_responses_raw_cot_with_summary(allow_model_requests: None) model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7613,6 +7863,7 @@ async def test_openai_responses_multiple_summaries(allow_model_requests: None): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7631,6 +7882,7 @@ async def test_openai_responses_multiple_summaries(allow_model_requests: None): model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7661,6 +7913,7 @@ async def test_openai_responses_raw_cot_stream_openrouter(allow_model_requests: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7684,7 +7937,10 @@ async def test_openai_responses_raw_cot_stream_openrouter(allow_model_requests: model_name='openai/gpt-oss-20b', timestamp=IsDatetime(), provider_name='openrouter', - provider_details={'finish_reason': 'completed'}, + provider_details={ + 'finish_reason': 'completed', + 'timestamp': datetime(2025, 11, 27, 17, 43, 31, tzinfo=timezone.utc), + }, provider_response_id='gen-1764265411-Fu1iEX7h5MRWiL79lb94', finish_reason='stop', run_id=IsStr(), @@ -7799,6 +8055,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7814,6 +8071,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7831,6 +8089,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7846,6 +8105,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7856,6 +8116,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7873,6 +8134,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7890,6 +8152,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7905,6 +8168,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7915,6 +8179,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7932,6 +8197,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), @@ -7942,6 +8208,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -7957,6 +8224,7 @@ async def capture_messages(*args: Any, **kwargs: Any) -> Any: model_name='gpt-4o-123', timestamp=IsDatetime(), provider_name='openai', + provider_details={'timestamp': datetime(2024, 1, 1, 0, 0, tzinfo=timezone.utc)}, provider_response_id='123', run_id=IsStr(), ), diff --git a/tests/models/test_openrouter.py b/tests/models/test_openrouter.py index d439fee026..b0d8f0e504 100644 --- a/tests/models/test_openrouter.py +++ b/tests/models/test_openrouter.py @@ -1,3 +1,4 @@ +import datetime from collections.abc import Sequence from typing import Literal, cast @@ -95,7 +96,20 @@ async def test_openrouter_stream_with_native_options(allow_model_requests: None, _ = [chunk async for chunk in stream] - assert stream.provider_details == snapshot({'finish_reason': 'completed', 'downstream_provider': 'xAI'}) + assert stream.provider_details is not None + assert stream.provider_details == snapshot( + { + 'timestamp': datetime.datetime(2025, 11, 2, 6, 14, 57, tzinfo=datetime.timezone.utc), + 'finish_reason': 'completed', + 'cost': 0.00333825, + 'upstream_inference_cost': None, + 'is_byok': False, + 'downstream_provider': 'xAI', + } + ) + # Explicitly verify native_finish_reason is 'completed' and wasn't overwritten by the + # final usage chunk (which has native_finish_reason: null, see cassette for details) + assert stream.provider_details['finish_reason'] == 'completed' assert stream.finish_reason == snapshot('stop') @@ -326,6 +340,40 @@ async def test_openrouter_validate_error_response(openrouter_api_key: str) -> No ) +async def test_openrouter_with_provider_details_but_no_parent_details(openrouter_api_key: str) -> None: + from typing import Any + + class TestOpenRouterModel(OpenRouterModel): + def _process_provider_details(self, response: ChatCompletion) -> dict[str, Any] | None: + from pydantic_ai.models.openrouter import ( + _map_openrouter_provider_details, # pyright: ignore[reportPrivateUsage] + _OpenRouterChatCompletion, # pyright: ignore[reportPrivateUsage] + ) + + assert isinstance(response, _OpenRouterChatCompletion) + openrouter_details = _map_openrouter_provider_details(response) + return openrouter_details or None + + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = TestOpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) + + choice = Choice.model_construct( + index=0, message={'role': 'assistant', 'content': 'test'}, finish_reason='stop', native_finish_reason='stop' + ) + response = ChatCompletion.model_construct( + id='test', choices=[choice], created=1704067200, object='chat.completion', model='test', provider='TestProvider' + ) + result = model._process_response(response) # type: ignore[reportPrivateUsage] + + assert result.provider_details == snapshot( + { + 'downstream_provider': 'TestProvider', + 'finish_reason': 'stop', + 'timestamp': datetime.datetime(2024, 1, 1, 0, 0, tzinfo=datetime.timezone.utc), + } + ) + + async def test_openrouter_map_messages_reasoning(allow_model_requests: None, openrouter_api_key: str) -> None: provider = OpenRouterProvider(api_key=openrouter_api_key) model = OpenRouterModel('anthropic/claude-3.7-sonnet:thinking', provider=provider) @@ -408,6 +456,29 @@ class FindEducationContentFilters(BaseModel): ) +async def test_openrouter_no_openrouter_details(openrouter_api_key: str) -> None: + """Test _process_provider_details when _map_openrouter_provider_details returns empty dict.""" + from unittest.mock import patch + + provider = OpenRouterProvider(api_key=openrouter_api_key) + model = OpenRouterModel('google/gemini-2.0-flash-exp:free', provider=provider) + + choice = Choice.model_construct( + index=0, message={'role': 'assistant', 'content': 'test'}, finish_reason='stop', native_finish_reason='stop' + ) + response = ChatCompletion.model_construct( + id='test', choices=[choice], created=1704067200, object='chat.completion', model='test', provider='TestProvider' + ) + + with patch('pydantic_ai.models.openrouter._map_openrouter_provider_details', return_value={}): + result = model._process_response(response) # type: ignore[reportPrivateUsage] + + # With empty openrouter_details, we should still get the parent's provider_details (timestamp + finish_reason) + assert result.provider_details == snapshot( + {'finish_reason': 'stop', 'timestamp': datetime.datetime(2024, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)} + ) + + async def test_openrouter_google_nested_schema(allow_model_requests: None, openrouter_api_key: str) -> None: """Test that nested schemas with $defs/$ref work correctly with OpenRouter + Gemini. diff --git a/tests/models/test_outlines.py b/tests/models/test_outlines.py index 4ecc3668ba..c59a736044 100644 --- a/tests/models/test_outlines.py +++ b/tests/models/test_outlines.py @@ -322,6 +322,7 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Answer in one word.', run_id=IsStr(), ), @@ -338,6 +339,7 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Answer in one word.', run_id=IsStr(), ), @@ -349,6 +351,7 @@ async def test_request_async(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), instructions='Answer in one word.', run_id=IsStr(), ), @@ -370,6 +373,7 @@ def test_request_sync(llamacpp_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -400,6 +404,7 @@ async def test_request_async_model(mock_async_model: OutlinesModel) -> None: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -435,6 +440,7 @@ def test_request_image_binary(transformers_multimodal_model: OutlinesModel, bina timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -466,6 +472,7 @@ def test_request_image_url(transformers_multimodal_model: OutlinesModel) -> None timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -522,6 +529,7 @@ class Box(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse(parts=[TextPart(content=IsStr())], timestamp=IsDatetime(), run_id=IsStr()), @@ -540,7 +548,8 @@ def test_input_format(transformers_multimodal_model: OutlinesModel, binary_image SystemPromptPart(content='You are a helpful assistance'), UserPromptPart(content='Hello'), RetryPromptPart(content='Failure'), - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[ @@ -562,7 +571,8 @@ def test_input_format(transformers_multimodal_model: OutlinesModel, binary_image AudioUrl('https://example.com/audio.mp3'), ] ) - ] + ], + timestamp=IsDatetime(), ) ] with pytest.raises( @@ -573,14 +583,18 @@ def test_input_format(transformers_multimodal_model: OutlinesModel, binary_image # unsupported: tool calls tool_call_message_history: list[ModelMessage] = [ ModelResponse(parts=[ToolCallPart(tool_call_id='1', tool_name='get_location')]), - ModelRequest(parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')]), + ModelRequest( + parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')], timestamp=IsDatetime() + ), ] with pytest.raises(UserError, match='Tool calls are not supported for Outlines models yet.'): agent.run_sync('How are you doing?', message_history=tool_call_message_history) # unsupported: tool returns tool_return_message_history: list[ModelMessage] = [ - ModelRequest(parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')]) + ModelRequest( + parts=[ToolReturnPart(tool_name='get_location', content='London', tool_call_id='1')], timestamp=IsDatetime() + ) ] with pytest.raises(UserError, match='Tool calls are not supported for Outlines models yet.'): agent.run_sync('How are you doing?', message_history=tool_return_message_history) diff --git a/tests/test_a2a.py b/tests/test_a2a.py index 4e5f74f476..e19e7e5710 100644 --- a/tests/test_a2a.py +++ b/tests/test_a2a.py @@ -1,4 +1,5 @@ import uuid +from datetime import timezone import anyio import httpx @@ -21,7 +22,7 @@ from pydantic_ai.models.function import AgentInfo, FunctionModel from pydantic_ai.usage import RequestUsage -from .conftest import IsDatetime, IsStr, try_import +from .conftest import IsDatetime, IsNow, IsStr, try_import with try_import() as imports_successful: from fasta2a.client import A2AClient @@ -579,6 +580,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon [ ModelRequest( parts=[UserPromptPart(content='First message', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ] @@ -618,6 +620,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon [ ModelRequest( parts=[UserPromptPart(content='First message', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -641,6 +644,7 @@ def track_messages(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon ), UserPromptPart(content='Second message', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] diff --git a/tests/test_ag_ui.py b/tests/test_ag_ui.py index 5cbf85fc69..b678029abb 100644 --- a/tests/test_ag_ui.py +++ b/tests/test_ag_ui.py @@ -6,6 +6,7 @@ import uuid from collections.abc import AsyncIterator, MutableMapping from dataclasses import dataclass +from datetime import timezone from http import HTTPStatus from typing import Any @@ -52,7 +53,7 @@ from pydantic_ai.output import OutputDataT from pydantic_ai.tools import AgentDepsT, ToolDefinition -from .conftest import IsDatetime, IsSameStr, try_import +from .conftest import IsDatetime, IsNow, IsSameStr, try_import with try_import() as imports_successful: from ag_ui.core import ( @@ -1525,7 +1526,8 @@ async def test_messages() -> None: content='User message', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -1566,7 +1568,8 @@ async def test_messages() -> None: content='User message', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[TextPart(content='Assistant message')], diff --git a/tests/test_agent.py b/tests/test_agent.py index 9c44f4adfb..9f4ab56e80 100644 --- a/tests/test_agent.py +++ b/tests/test_agent.py @@ -183,6 +183,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -208,6 +209,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -226,6 +228,7 @@ def return_model(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -316,6 +319,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -334,6 +338,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -352,6 +357,7 @@ def validate_output(ctx: RunContext[None], o: Foo) -> Foo: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -460,6 +466,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -477,6 +484,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -497,6 +505,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -509,6 +518,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: tool_name='final_result', content='foobar', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -522,6 +532,7 @@ def return_tuple(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -1048,6 +1059,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1072,6 +1084,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1096,6 +1109,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1135,6 +1149,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1152,6 +1167,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1327,6 +1343,7 @@ def say_world(_: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1383,6 +1400,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1407,6 +1425,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1421,6 +1440,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1445,6 +1465,7 @@ def call_handoff_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelRes timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -1785,6 +1806,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1821,6 +1843,7 @@ class Foo(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1886,6 +1909,7 @@ def return_foo_bar(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1933,6 +1957,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1957,6 +1982,7 @@ class CityLocation(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2014,6 +2040,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2031,6 +2058,7 @@ def call_tool(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2061,6 +2089,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2076,6 +2105,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2097,6 +2127,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2112,6 +2143,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2123,6 +2155,7 @@ async def ret_a(x: str) -> str: ), ModelRequest( parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2162,6 +2195,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2177,6 +2211,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2188,6 +2223,7 @@ async def ret_a(x: str) -> str: ), ModelRequest( parts=[UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2226,6 +2262,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2241,6 +2278,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2265,6 +2303,7 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2278,6 +2317,7 @@ async def ret_a(x: str) -> str: SystemPromptPart(content='Foobar', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2293,6 +2333,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2311,6 +2352,7 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), # second call, notice no repeated system prompt @@ -2318,6 +2360,7 @@ async def ret_a(x: str) -> str: parts=[ UserPromptPart(content='Hello again', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2336,6 +2379,7 @@ async def ret_a(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -2403,6 +2447,7 @@ async def instructions(ctx: RunContext) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), instructions='New instructions', run_id=IsStr(), ), @@ -2461,6 +2506,7 @@ def test_tool() -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2558,6 +2604,7 @@ async def test_message_history_ending_on_model_response_with_instructions(): [ ModelRequest( parts=[], + timestamp=IsNow(tz=timezone.utc), instructions="""\ Summarize this conversation to include all important facts about the user and what their interactions were about.\ @@ -2595,6 +2642,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2606,6 +2654,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: ), ModelRequest( parts=[], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2638,6 +2687,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2649,6 +2699,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: ), ModelRequest( parts=[], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2686,6 +2737,7 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2704,6 +2756,7 @@ def empty(_: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2732,6 +2785,7 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -2750,6 +2804,7 @@ def empty(m: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3107,6 +3162,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3163,6 +3219,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3219,6 +3276,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover content='test early strategy with final result in middle', timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3271,6 +3329,7 @@ def deferred_tool(x: int) -> int: # pragma: no cover timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -3389,6 +3448,7 @@ async def get_location(loc_name: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3410,6 +3470,7 @@ async def get_location(loc_name: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3438,6 +3499,7 @@ def test_nested_capture_run_messages() -> None: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3465,6 +3527,7 @@ def test_double_capture_run_messages() -> None: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3516,6 +3579,7 @@ async def func() -> str: ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -3546,6 +3610,7 @@ async def func() -> str: ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -3559,6 +3624,7 @@ async def func() -> str: ), ModelRequest( parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt')], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -3605,6 +3671,7 @@ async def func(): ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -3636,6 +3703,7 @@ async def func(): ), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt'), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -3649,6 +3717,7 @@ async def func(): ), ModelRequest( parts=[UserPromptPart(content='World', timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt')], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='request', ), @@ -3704,6 +3773,7 @@ async def foobar(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='foobar', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3722,6 +3792,7 @@ async def foobar(x: str) -> str: timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -3798,6 +3869,7 @@ def test_binary_content_serializable(): 'part_kind': 'user-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -3860,6 +3932,7 @@ def test_image_url_serializable_missing_media_type(): 'part_kind': 'user-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -3929,6 +4002,7 @@ def test_image_url_serializable(): 'part_kind': 'user-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -4051,6 +4125,7 @@ def get_image() -> BinaryContent: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -4103,6 +4178,7 @@ def get_files(): timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ) ) @@ -4122,6 +4198,7 @@ def system_prompt() -> str: SystemPromptPart(content='A system prompt!', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), instructions='An instructions!', run_id=IsStr(), ) @@ -4146,6 +4223,7 @@ def empty_instructions() -> str: SystemPromptPart(content='A system prompt!', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), instructions='An instructions!', run_id=IsStr(), ) @@ -4161,6 +4239,7 @@ def test_instructions_both_instructions_and_system_prompt_are_set(): SystemPromptPart(content='A system prompt!', timestamp=IsNow(tz=timezone.utc)), UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc)), ], + timestamp=IsNow(tz=timezone.utc), instructions='An instructions!', run_id=IsStr(), ) @@ -4178,6 +4257,7 @@ def instructions() -> str: assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ) @@ -4195,6 +4275,7 @@ def instructions_2() -> str: assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ) @@ -4214,6 +4295,7 @@ def test_instructions_with_message_history(): ), ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), instructions='You are a helpful assistant.', run_id=IsStr(), ), @@ -4242,6 +4324,7 @@ def empty_instructions() -> str: assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions="""\ You are a helpful assistant. @@ -4258,6 +4341,7 @@ def test_instructions_during_run(): assert result.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions="""\ You are a helpful assistant. Your task is to greet people.\ @@ -4270,6 +4354,7 @@ def test_instructions_during_run(): assert result2.all_messages()[0] == snapshot( ModelRequest( parts=[UserPromptPart(content='Hello again!', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), instructions="""\ You are a helpful assistant.\ """, @@ -4298,7 +4383,12 @@ class Output(BaseModel): assert messages == snapshot( [ - ModelRequest(parts=[], instructions='Agent 2 instructions', run_id=IsStr()), + ModelRequest( + parts=[], + timestamp=IsNow(tz=timezone.utc), + instructions='Agent 2 instructions', + run_id=IsStr(), + ), ModelResponse( parts=[ToolCallPart(tool_name='final_result', args={'text': 'a'}, tool_call_id=IsStr())], usage=RequestUsage(input_tokens=51, output_tokens=9), @@ -4315,6 +4405,7 @@ class Output(BaseModel): timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -4347,6 +4438,7 @@ def my_tool(x: int) -> int: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4365,6 +4457,7 @@ def my_tool(x: int) -> int: tool_name='my_tool', content=2, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4383,6 +4476,7 @@ def my_tool(x: int) -> int: tool_name='my_tool', content=4, tool_call_id=IsStr(), timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4454,6 +4548,7 @@ def foo_tool(foo: Foo) -> int: 'part_kind': 'retry-prompt', } ], + 'timestamp': IsStr(), 'instructions': None, 'kind': 'request', 'run_id': IsStr(), @@ -4526,6 +4621,7 @@ def analyze_data() -> ToolReturn: [ ModelRequest( parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4560,6 +4656,7 @@ def analyze_data() -> ToolReturn: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4607,6 +4704,7 @@ def analyze_data() -> ToolReturn: [ ModelRequest( parts=[UserPromptPart(content='Please analyze the data', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4633,6 +4731,7 @@ def analyze_data() -> ToolReturn: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4911,6 +5010,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4929,6 +5029,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -4947,6 +5048,7 @@ def respond(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5002,6 +5104,7 @@ async def only_if_plan_presented( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5026,6 +5129,7 @@ async def only_if_plan_presented( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5050,6 +5154,7 @@ async def only_if_plan_presented( timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -5314,6 +5419,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5331,6 +5437,7 @@ def model_function(messages: list[ModelMessage], info: AgentInfo) -> ModelRespon timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5388,6 +5495,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5418,6 +5526,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -5446,6 +5555,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5476,6 +5586,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelRequest( @@ -5493,6 +5604,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5523,6 +5635,7 @@ def create_file(path: str, content: str) -> str: timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5671,6 +5784,7 @@ def update_file(ctx: RunContext, path: str, content: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Update .env file', timestamp=IsDatetime())], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5708,6 +5822,7 @@ def update_file(ctx: RunContext, path: str, content: str) -> str: ), UserPromptPart(content='continue with the operation', timestamp=IsDatetime()), ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5777,6 +5892,7 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5798,6 +5914,7 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -5831,6 +5948,7 @@ def llm(messages: list[ModelMessage], info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6101,6 +6219,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6119,6 +6238,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6143,6 +6263,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6159,6 +6280,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6177,6 +6299,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -6201,6 +6324,7 @@ def roll_dice() -> int: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] @@ -6306,6 +6430,7 @@ def llm(messages: list[ModelMessage], _info: AgentInfo) -> ModelResponse: timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_dbos.py b/tests/test_dbos.py index 1d3b9991db..dc25ec950f 100644 --- a/tests/test_dbos.py +++ b/tests/test_dbos.py @@ -8,7 +8,7 @@ from collections.abc import AsyncIterable, AsyncIterator, Generator, Iterator from contextlib import contextmanager from dataclasses import dataclass, field -from datetime import datetime +from datetime import datetime, timezone from typing import Any, Literal import pytest @@ -44,7 +44,7 @@ from pydantic_ai.run import AgentRunResult from pydantic_ai.usage import RequestUsage -from .conftest import IsDatetime, IsStr +from .conftest import IsDatetime, IsNow, IsStr try: import importlib.metadata @@ -1404,6 +1404,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1433,7 +1434,10 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques model_name=IsStr(), timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1453,6 +1457,7 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1473,7 +1478,10 @@ async def hitl_main_loop(prompt: str) -> AgentRunResult[str | DeferredToolReques model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1535,6 +1543,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1564,7 +1573,10 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest model_name=IsStr(), timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1584,6 +1596,7 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest timestamp=IsDatetime(), ), ], + timestamp=IsNow(tz=timezone.utc), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1604,7 +1617,10 @@ def hitl_main_loop_sync(prompt: str) -> AgentRunResult[str | DeferredToolRequest model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1642,6 +1658,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1665,7 +1682,10 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1679,6 +1699,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1702,7 +1723,10 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1716,6 +1740,7 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO timestamp=IsDatetime(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -1733,7 +1758,10 @@ async def test_dbos_agent_with_model_retry(allow_model_requests: None, dbos: DBO model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), diff --git a/tests/test_history_processor.py b/tests/test_history_processor.py index 89e487dc8c..09dc6f4689 100644 --- a/tests/test_history_processor.py +++ b/tests/test_history_processor.py @@ -66,6 +66,7 @@ def no_op_history_processor(messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -77,6 +78,7 @@ def no_op_history_processor(messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -124,7 +126,8 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] content='Processed answer', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -133,9 +136,13 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] [ ModelRequest( parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), - ModelRequest(parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())]), + ModelRequest( + parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())], + timestamp=IsDatetime(), + ), ModelResponse( parts=[TextPart(content='Provider response')], usage=RequestUsage(input_tokens=54, output_tokens=2), @@ -183,7 +190,8 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] content='Processed answer', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -192,9 +200,13 @@ def process_previous_answers(messages: list[ModelMessage]) -> list[ModelMessage] [ ModelRequest( parts=[UserPromptPart(content='Question 3', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), - ModelRequest(parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())]), + ModelRequest( + parts=[SystemPromptPart(content='Processed answer', timestamp=IsDatetime())], + timestamp=IsDatetime(), + ), ModelResponse( parts=[TextPart(content='hello')], usage=RequestUsage(input_tokens=50, output_tokens=1), @@ -238,7 +250,8 @@ def capture_messages_processor(messages: list[ModelMessage]) -> list[ModelMessag content='New question', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -248,6 +261,7 @@ def capture_messages_processor(messages: list[ModelMessage]) -> list[ModelMessag ModelRequest(parts=[UserPromptPart(content='Previous question', timestamp=IsDatetime())]), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -306,7 +320,10 @@ def second_processor(messages: list[ModelMessage]) -> list[ModelMessage]: [ ModelRequest(parts=[UserPromptPart(content='[SECOND] [FIRST] Question', timestamp=IsDatetime())]), ModelResponse(parts=[TextPart(content='Answer')], timestamp=IsDatetime()), - ModelRequest(parts=[UserPromptPart(content='[SECOND] [FIRST] New question', timestamp=IsDatetime())]), + ModelRequest( + parts=[UserPromptPart(content='[SECOND] [FIRST] New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), + ), ] ) assert captured_messages == result.all_messages() @@ -330,7 +347,8 @@ def second_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='[SECOND] [FIRST] New question', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -371,7 +389,8 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='Question 2', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -393,6 +412,7 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -441,7 +461,8 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: content='Question 2', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -463,6 +484,7 @@ async def async_processor(messages: list[ModelMessage]) -> list[ModelMessage]: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -509,7 +531,8 @@ def context_processor(ctx: RunContext[str], messages: list[ModelMessage]) -> lis content='PREFIX: test', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -522,7 +545,8 @@ def context_processor(ctx: RunContext[str], messages: list[ModelMessage]) -> lis content='PREFIX: test', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -564,6 +588,7 @@ async def async_context_processor(ctx: RunContext[Any], messages: list[ModelMess timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ) ] @@ -578,6 +603,7 @@ async def async_context_processor(ctx: RunContext[Any], messages: list[ModelMess timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -642,7 +668,8 @@ class Deps: content='TEST: Question 2', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -663,7 +690,8 @@ class Deps: content='TEST: Question 2', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -702,7 +730,8 @@ def return_new_history(messages: list[ModelMessage]) -> list[ModelMessage]: content='Modified message', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ) ] ) @@ -715,7 +744,8 @@ def return_new_history(messages: list[ModelMessage]) -> list[ModelMessage]: content='Modified message', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsDatetime(), ), ModelResponse( parts=[TextPart(content='Provider response')], @@ -774,6 +804,7 @@ def __call__(self, messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -785,6 +816,7 @@ def __call__(self, messages: list[ModelMessage]) -> list[ModelMessage]: ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -822,6 +854,7 @@ def __call__(self, _: RunContext, messages: list[ModelMessage]) -> list[ModelMes ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -833,6 +866,7 @@ def __call__(self, _: RunContext, messages: list[ModelMessage]) -> list[ModelMes ModelResponse(parts=[TextPart(content='Previous answer')], timestamp=IsDatetime()), ModelRequest( parts=[UserPromptPart(content='New question', timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_logfire.py b/tests/test_logfire.py index dadb930dd0..d48eeae93b 100644 --- a/tests/test_logfire.py +++ b/tests/test_logfire.py @@ -22,7 +22,7 @@ from pydantic_ai.toolsets.function import FunctionToolset from pydantic_ai.toolsets.wrapper import WrapperToolset -from .conftest import IsStr +from .conftest import IsDatetime, IsStr try: import logfire @@ -2738,7 +2738,11 @@ def instructions(ctx: RunContext[None]): result = my_agent.run_sync( 'Hello', message_history=[ - ModelRequest(parts=[UserPromptPart(content='Hi')], instructions='Instructions from a previous agent run'), + ModelRequest( + parts=[UserPromptPart(content='Hi')], + instructions='Instructions from a previous agent run', + timestamp=IsDatetime(), + ), ModelResponse(parts=[TextPart(content='Hello')]), ], output_type=MyOutput, diff --git a/tests/test_mcp.py b/tests/test_mcp.py index d53ad47551..bdcf283632 100644 --- a/tests/test_mcp.py +++ b/tests/test_mcp.py @@ -225,6 +225,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -248,7 +249,10 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlnvvqIPFofAtKqtQKMWZkgXhzlT', finish_reason='tool_call', run_id=IsStr(), @@ -262,6 +266,7 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -279,7 +284,10 @@ async def test_agent_with_stdio_server(allow_model_requests: None, agent: Agent) model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlnyjUo5wlyqvdNdM5I8vIWjo1qF', finish_reason='stop', run_id=IsStr(), @@ -394,6 +402,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -417,7 +426,10 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlo3e1Ud2lnvkddMilmwC7LAemiy', finish_reason='tool_call', run_id=IsStr(), @@ -431,6 +443,7 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -452,7 +465,10 @@ async def test_tool_returning_str(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlo41LxqBYgGKWgGrQn67fQacOLp', finish_reason='stop', run_id=IsStr(), @@ -474,6 +490,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -497,7 +514,10 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRmhyweJVYonarb7s9ckIMSHf2vHo', finish_reason='tool_call', run_id=IsStr(), @@ -511,6 +531,7 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -528,7 +549,10 @@ async def test_tool_returning_text_resource(allow_model_requests: None, agent: A model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRmhzqXFObpYwSzREMpJvX9kbDikR', finish_reason='stop', run_id=IsStr(), @@ -550,6 +574,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -573,7 +598,10 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdHSFe0EykAOpf0LWZzsWAodIQzb', finish_reason='tool_call', run_id=IsStr(), @@ -587,6 +615,7 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -604,7 +633,10 @@ async def test_tool_returning_text_resource_link(allow_model_requests: None, age model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdHTIlBZWzXJPBR8VTOdC4O57ZQA', finish_reason='stop', run_id=IsStr(), @@ -628,6 +660,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -651,7 +684,10 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlo7KYJVXuNZ5lLLdYcKZDsX2CHb', finish_reason='tool_call', run_id=IsStr(), @@ -666,6 +702,7 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -687,7 +724,10 @@ async def test_tool_returning_image_resource(allow_model_requests: None, agent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloBGHh27w3fQKwxq4fX2cPuZJa9', finish_reason='stop', run_id=IsStr(), @@ -713,6 +753,7 @@ async def test_tool_returning_image_resource_link( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -736,7 +777,10 @@ async def test_tool_returning_image_resource_link( model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdHygYePH1mZgHo2Xxzib0Y7sId7', finish_reason='tool_call', run_id=IsStr(), @@ -751,6 +795,7 @@ async def test_tool_returning_image_resource_link( ), UserPromptPart(content=['This is file 1c8566:', image_content], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -772,7 +817,10 @@ async def test_tool_returning_image_resource_link( model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BwdI2D2r9dvqq3pbsA0qgwKDEdTtD', finish_reason='stop', run_id=IsStr(), @@ -792,6 +840,7 @@ async def test_tool_returning_audio_resource( [ ModelRequest( parts=[UserPromptPart(content="What's the content of the audio resource?", timestamp=IsDatetime())], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -817,6 +866,7 @@ async def test_tool_returning_audio_resource( ), UserPromptPart(content=['This is file 2d36ae:', audio_content], timestamp=IsDatetime()), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -855,6 +905,7 @@ async def test_tool_returning_audio_resource_link( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -893,6 +944,7 @@ async def test_tool_returning_audio_resource_link( timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -928,6 +980,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -951,7 +1004,10 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloGQJWIX0Qk7gtNzF4s2Fez0O29', finish_reason='tool_call', run_id=IsStr(), @@ -972,6 +1028,7 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -989,7 +1046,10 @@ async def test_tool_returning_image(allow_model_requests: None, agent: Agent, im model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloJHR654fSD0fcvLWZxtKtn0pag', finish_reason='stop', run_id=IsStr(), @@ -1011,6 +1071,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1028,7 +1089,10 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloOs7Bb2tq8wJyy9Rv7SQ7L65a7', finish_reason='tool_call', run_id=IsStr(), @@ -1042,6 +1106,7 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1059,7 +1124,10 @@ async def test_tool_returning_dict(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloPczU1HSCWnreyo21DdNtdOM7L', finish_reason='stop', run_id=IsStr(), @@ -1081,6 +1149,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1102,7 +1171,10 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-CLbP82ODQMEznhobUKdq6Rjn9Aa12', finish_reason='tool_call', run_id=IsStr(), @@ -1116,6 +1188,7 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1133,7 +1206,10 @@ async def test_tool_returning_unstructured_dict(allow_model_requests: None, agen model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-CLbPAOYN3jPYdvYeD8JNOOXF5N554', finish_reason='stop', run_id=IsStr(), @@ -1157,6 +1233,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1180,7 +1257,10 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloSNg7aGSp1rXDkhInjMIUHKd7A', finish_reason='tool_call', run_id=IsStr(), @@ -1194,6 +1274,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1217,7 +1298,10 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloTvSkFeX4DZKQLqfH9KbQkWlpt', finish_reason='tool_call', run_id=IsStr(), @@ -1231,6 +1315,7 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1252,7 +1337,10 @@ async def test_tool_returning_error(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloU3MhnqNEqujs28a3ofRbs7VPF', finish_reason='stop', run_id=IsStr(), @@ -1274,6 +1362,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1291,7 +1380,10 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloX2RokWc9j9PAXAuNXGR73WNqY', finish_reason='tool_call', run_id=IsStr(), @@ -1305,6 +1397,7 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1322,7 +1415,10 @@ async def test_tool_returning_none(allow_model_requests: None, agent: Agent): model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloYWGujk8yE94gfVSsM1T1Ol2Ej', finish_reason='stop', run_id=IsStr(), @@ -1346,6 +1442,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1369,7 +1466,10 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={ + 'finish_reason': 'tool_calls', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRlobKLgm6vf79c9O8sloZaYx3coC', finish_reason='tool_call', run_id=IsStr(), @@ -1395,6 +1495,7 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1416,7 +1517,10 @@ async def test_tool_returning_multiple_items(allow_model_requests: None, agent: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={ + 'finish_reason': 'stop', + 'timestamp': IsDatetime(), + }, provider_response_id='chatcmpl-BRloepWR5NJpTgSqFBGTSPeM1SWm8', finish_reason='stop', run_id=IsStr(), @@ -1501,7 +1605,8 @@ def test_map_from_mcp_params_model_request(): content=[BinaryContent(data=b'img', media_type='image/png', identifier='978ea7')], timestamp=IsNow(tz=timezone.utc), ), - ] + ], + timestamp=IsNow(tz=timezone.utc), ) ] ) diff --git a/tests/test_messages.py b/tests/test_messages.py index f116502882..e922ef497a 100644 --- a/tests/test_messages.py +++ b/tests/test_messages.py @@ -449,7 +449,7 @@ def test_pre_usage_refactor_messages_deserializable(): content='What is the capital of Mexico?', timestamp=IsNow(tz=timezone.utc), ) - ] + ], ), ModelResponse( parts=[TextPart(content='Mexico City.')], diff --git a/tests/test_prefect.py b/tests/test_prefect.py index b1c18b9803..ec27c4a86a 100644 --- a/tests/test_prefect.py +++ b/tests/test_prefect.py @@ -68,7 +68,7 @@ from inline_snapshot import snapshot -from .conftest import IsStr +from .conftest import IsDatetime, IsStr pytestmark = [ pytest.mark.anyio, @@ -1037,7 +1037,9 @@ async def test_cache_policy_custom(): # First set of messages messages1 = [ - ModelRequest(parts=[UserPromptPart(content='What is the capital of France?', timestamp=time1)]), + ModelRequest( + parts=[UserPromptPart(content='What is the capital of France?', timestamp=time1)], timestamp=IsDatetime() + ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], usage=RequestUsage(input_tokens=10, output_tokens=10), @@ -1048,7 +1050,9 @@ async def test_cache_policy_custom(): # Second set of messages - same content, different timestamps messages2 = [ - ModelRequest(parts=[UserPromptPart(content='What is the capital of France?', timestamp=time2)]), + ModelRequest( + parts=[UserPromptPart(content='What is the capital of France?', timestamp=time2)], timestamp=IsDatetime() + ), ModelResponse( parts=[TextPart(content='The capital of France is Paris.')], usage=RequestUsage(input_tokens=10, output_tokens=10), @@ -1077,7 +1081,9 @@ async def test_cache_policy_custom(): # Also test that different content produces different hashes messages3 = [ - ModelRequest(parts=[UserPromptPart(content='What is the capital of Spain?', timestamp=time1)]), + ModelRequest( + parts=[UserPromptPart(content='What is the capital of Spain?', timestamp=time1)], timestamp=IsDatetime() + ), ModelResponse( parts=[TextPart(content='The capital of Spain is Madrid.')], usage=RequestUsage(input_tokens=10, output_tokens=10), diff --git a/tests/test_streaming.py b/tests/test_streaming.py index 0c6a46f3c0..17ddfc6f1f 100644 --- a/tests/test_streaming.py +++ b/tests/test_streaming.py @@ -1,6 +1,5 @@ from __future__ import annotations as _annotations -import datetime import json import re from collections.abc import AsyncIterable, AsyncIterator @@ -73,6 +72,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -89,6 +89,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -109,6 +110,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -125,6 +127,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -165,6 +168,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -181,6 +185,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -211,6 +216,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -227,6 +233,7 @@ async def ret_a(x: str) -> str: tool_name='ret_a', content='a-apple', timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -589,6 +596,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -607,6 +615,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -616,6 +625,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -634,6 +644,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -658,6 +669,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -689,6 +701,7 @@ async def stream_structured_function( timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -698,7 +711,11 @@ async def stream_structured_function( timestamp=IsDatetime(), run_id=IsStr(), ), - ModelRequest(parts=[], run_id=IsStr()), + ModelRequest( + parts=[], + timestamp=IsDatetime(), + run_id=IsStr(), + ), ModelResponse( parts=[TextPart(content='ok here is text')], usage=RequestUsage(input_tokens=50, output_tokens=4), @@ -733,6 +750,7 @@ async def ret_a(x: str) -> str: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -789,6 +807,7 @@ def another_tool(y: int) -> int: # pragma: no cover [ ModelRequest( parts=[UserPromptPart(content='test early strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -823,6 +842,7 @@ def another_tool(y: int) -> int: # pragma: no cover tool_call_id=IsStr(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -849,6 +869,7 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt [ ModelRequest( parts=[UserPromptPart(content='test multiple final results', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -876,6 +897,7 @@ async def sf(_: list[ModelMessage], info: AgentInfo) -> AsyncIterator[str | Delt tool_call_id=IsStr(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -918,6 +940,7 @@ def another_tool(y: int) -> int: [ ModelRequest( parts=[UserPromptPart(content='test exhaustive strategy', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -960,6 +983,7 @@ def another_tool(y: int) -> int: timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1006,10 +1030,11 @@ def another_tool(y: int) -> int: # pragma: no cover parts=[ UserPromptPart( content='test early strategy with final result in middle', - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt', ) ], + timestamp=IsDatetime(), run_id=IsStr(), kind='request', ), @@ -1042,7 +1067,7 @@ def another_tool(y: int) -> int: # pragma: no cover ], usage=RequestUsage(input_tokens=50, output_tokens=14), model_name='function::sf', - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='response', ), @@ -1052,31 +1077,32 @@ def another_tool(y: int) -> int: # pragma: no cover tool_name='final_result', content='Final result processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='tool-return', ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='tool-return', ), ToolReturnPart( tool_name='another_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='tool-return', ), RetryPromptPart( content="Unknown tool name: 'unknown_tool'. Available tools: 'final_result', 'regular_tool', 'another_tool'", tool_name='unknown_tool', tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='retry-prompt', ), ], + timestamp=IsDatetime(), run_id=IsStr(), kind='request', ), @@ -1139,10 +1165,11 @@ def regular_tool(x: int) -> int: # pragma: no cover parts=[ UserPromptPart( content='test early strategy with external tool call', - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt', ) ], + timestamp=IsDatetime(), run_id=IsStr(), kind='request', ), @@ -1162,7 +1189,7 @@ def regular_tool(x: int) -> int: # pragma: no cover ], usage=RequestUsage(input_tokens=50, output_tokens=7), model_name='function::sf', - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='response', ), @@ -1172,15 +1199,16 @@ def regular_tool(x: int) -> int: # pragma: no cover tool_name='final_result', content='Output tool not used - a final result was already processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), ), ToolReturnPart( tool_name='regular_tool', content='Tool not executed - a final result was already processed.', tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), ), ], + timestamp=IsDatetime(), run_id=IsStr(), kind='request', ), @@ -1228,10 +1256,11 @@ def regular_tool(x: int) -> int: parts=[ UserPromptPart( content='test early strategy with external tool call', - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), part_kind='user-prompt', ) ], + timestamp=IsDatetime(), run_id=IsStr(), kind='request', ), @@ -1246,7 +1275,7 @@ def regular_tool(x: int) -> int: ], usage=RequestUsage(input_tokens=50, output_tokens=3), model_name='function::sf', - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), kind='response', ), @@ -1256,9 +1285,10 @@ def regular_tool(x: int) -> int: tool_name='regular_tool', content=1, tool_call_id=IsStr(), - timestamp=IsNow(tz=datetime.timezone.utc), + timestamp=IsNow(tz=timezone.utc), ) ], + timestamp=IsDatetime(), run_id=IsStr(), kind='request', ), @@ -1292,6 +1322,7 @@ def regular_tool(x: int) -> int: content='test early strategy with regular tool calls', timestamp=IsNow(tz=timezone.utc) ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1308,6 +1339,7 @@ def regular_tool(x: int) -> int: tool_name='regular_tool', content=0, timestamp=IsNow(tz=timezone.utc), tool_call_id=IsStr() ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1327,6 +1359,7 @@ def regular_tool(x: int) -> int: tool_call_id=IsStr(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1774,6 +1807,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1792,6 +1826,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_temporal.py b/tests/test_temporal.py index 98039d9078..91697f5c4b 100644 --- a/tests/test_temporal.py +++ b/tests/test_temporal.py @@ -1827,6 +1827,9 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: timestamp=IsDatetime(), ) ], + # NOTE in other tests we check timestamp=IsNow(tz=timezone.utc) + # but temporal tests fail when we use IsNow + timestamp=IsDatetime(), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1856,7 +1859,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: model_name=IsStr(), timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={'finish_reason': 'tool_calls', 'timestamp': '2025-08-28T22:11:03Z'}, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1876,6 +1879,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), instructions='Just call tools without asking for confirmation.', run_id=IsStr(), ), @@ -1898,7 +1902,7 @@ async def test_temporal_agent_with_hitl_tool(allow_model_requests: None, client: model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={'finish_reason': 'stop', 'timestamp': '2025-08-28T22:11:06Z'}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), @@ -1952,6 +1956,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1975,7 +1980,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={'finish_reason': 'tool_calls', 'timestamp': '2025-08-28T23:19:50Z'}, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -1989,6 +1994,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2012,7 +2018,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'tool_calls'}, + provider_details={'finish_reason': 'tool_calls', 'timestamp': '2025-08-28T23:19:51Z'}, provider_response_id=IsStr(), finish_reason='tool_call', run_id=IsStr(), @@ -2026,6 +2032,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2043,7 +2050,7 @@ async def test_temporal_agent_with_model_retry(allow_model_requests: None, clien model_name='gpt-4o-2024-08-06', timestamp=IsDatetime(), provider_name='openai', - provider_details={'finish_reason': 'stop'}, + provider_details={'finish_reason': 'stop', 'timestamp': '2025-08-28T23:19:52Z'}, provider_response_id=IsStr(), finish_reason='stop', run_id=IsStr(), diff --git a/tests/test_tools.py b/tests/test_tools.py index bcdf537994..8e11d30d8b 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -1364,6 +1364,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1382,6 +1383,7 @@ def my_tool(ctx: RunContext[None], x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1755,6 +1757,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1809,6 +1812,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -1848,6 +1852,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1902,6 +1907,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelRequest( @@ -1930,6 +1936,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1970,6 +1977,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -1991,6 +1999,7 @@ def buy(fruit: str): timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2067,7 +2076,8 @@ def buy(fruit: str): content='I bought a banana', timestamp=IsDatetime(), ), - ] + ], + timestamp=IsDatetime(), ), ] ) @@ -2148,6 +2158,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2170,6 +2181,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ] @@ -2201,6 +2213,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2223,6 +2236,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelRequest( @@ -2240,6 +2254,7 @@ def bar(x: int) -> int: timestamp=IsDatetime(), ), ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2387,6 +2402,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2405,6 +2421,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2423,6 +2440,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( @@ -2441,6 +2459,7 @@ def always_fail(ctx: RunContext[None]) -> str: timestamp=IsDatetime(), ) ], + timestamp=IsDatetime(), run_id=IsStr(), ), ModelResponse( diff --git a/tests/test_usage_limits.py b/tests/test_usage_limits.py index ac17fd0be5..5cb4d529be 100644 --- a/tests/test_usage_limits.py +++ b/tests/test_usage_limits.py @@ -100,6 +100,7 @@ async def ret_a(x: str) -> str: [ ModelRequest( parts=[UserPromptPart(content='Hello', timestamp=IsNow(tz=timezone.utc))], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ModelResponse( @@ -125,6 +126,7 @@ async def ret_a(x: str) -> str: tool_call_id=IsStr(), ) ], + timestamp=IsNow(tz=timezone.utc), run_id=IsStr(), ), ] diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 917bb51912..8d9b4ab759 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -2,6 +2,7 @@ import json from collections.abc import AsyncIterator, MutableMapping +from datetime import timezone from typing import Any, cast import pytest @@ -59,7 +60,7 @@ ) from pydantic_ai.ui.vercel_ai.response_types import BaseChunk, DataChunk -from .conftest import IsDatetime, IsSameStr, IsStr, try_import +from .conftest import IsDatetime, IsNow, IsSameStr, IsStr, try_import with try_import() as starlette_import_successful: from starlette.requests import Request @@ -184,7 +185,8 @@ async def test_run(allow_model_requests: None, openai_api_key: str): """, timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -216,7 +218,8 @@ async def test_run(allow_model_requests: None, openai_api_key: str): content='Give me the ToCs', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -237,7 +240,8 @@ async def test_run(allow_model_requests: None, openai_api_key: str): tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -257,7 +261,8 @@ async def test_run(allow_model_requests: None, openai_api_key: str): tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -273,7 +278,8 @@ async def test_run(allow_model_requests: None, openai_api_key: str): content='How do I get FastAPI instrumentation to include the HTTP request and response', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ] ) @@ -1832,7 +1838,8 @@ async def test_adapter_load_messages(): ], timestamp=IsDatetime(), ), - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -1848,7 +1855,8 @@ async def test_adapter_load_messages(): content='Give me the ToCs', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -1869,7 +1877,8 @@ async def test_adapter_load_messages(): tool_call_id='toolu_01XX3rjFfG77h3KCbVHoYJMQ', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -1889,7 +1898,8 @@ async def test_adapter_load_messages(): tool_call_id='toolu_01XX3rjFfG77h3KCbVHoY', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -1909,7 +1919,8 @@ async def test_adapter_load_messages(): tool_call_id='toolu_01W2yGpGQcMx7pXV2zZ4sz9g', timestamp=IsDatetime(), ) - ] + ], + timestamp=IsNow(tz=timezone.utc), ), ModelResponse( parts=[ @@ -2505,8 +2516,8 @@ def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[Mode for orig_part, new_part in zip(orig_msg.parts, new_msg.parts): if hasattr(orig_part, 'timestamp') and hasattr(new_part, 'timestamp'): new_part.timestamp = orig_part.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] - if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): - new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] + if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): # pragma: no branch + new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] # Load back to Pydantic AI format reloaded_messages = VercelAIAdapter.load_messages(ui_messages) @@ -2515,6 +2526,44 @@ def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[Mode assert reloaded_messages == original_messages +async def test_adapter_dump_load_roundtrip_without_timestamps(): + """Test that dump_messages and load_messages work when messages don't have timestamps.""" + original_messages = [ + ModelRequest( + parts=[ + UserPromptPart(content='User message'), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Response text'), + ] + ), + ] + + for msg in original_messages: + delattr(msg, 'timestamp') + + ui_messages = VercelAIAdapter.dump_messages(original_messages) + reloaded_messages = VercelAIAdapter.load_messages(ui_messages) + + def sync_timestamps(original: list[ModelRequest | ModelResponse], new: list[ModelRequest | ModelResponse]) -> None: + for orig_msg, new_msg in zip(original, new): + for orig_part, new_part in zip(orig_msg.parts, new_msg.parts): + if hasattr(orig_part, 'timestamp') and hasattr(new_part, 'timestamp'): + new_part.timestamp = orig_part.timestamp # pyright: ignore[reportAttributeAccessIssue, reportUnknownMemberType] + if hasattr(orig_msg, 'timestamp') and hasattr(new_msg, 'timestamp'): + new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] + + sync_timestamps(original_messages, reloaded_messages) + + for msg in reloaded_messages: + if hasattr(msg, 'timestamp'): # pragma: no branch + delattr(msg, 'timestamp') + + assert len(reloaded_messages) == len(original_messages) + + async def test_adapter_dump_messages_text_before_thinking(): """Test dumping messages where text precedes a thinking part.""" messages = [ @@ -2744,7 +2793,7 @@ async def test_adapter_dump_messages_thinking_with_metadata(): # Sync timestamps for comparison (ModelResponse always has timestamp) for orig_msg, new_msg in zip(original_messages, reloaded_messages): - new_msg.timestamp = orig_msg.timestamp # pyright: ignore[reportAttributeAccessIssue] + new_msg.timestamp = orig_msg.timestamp assert reloaded_messages == original_messages