diff --git a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py index 970f06e6ef..f0bcde498b 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/_adapter.py @@ -143,6 +143,12 @@ def load_messages(cls, messages: Sequence[MessageT]) -> list[ModelMessage]: """Transform protocol-specific messages into Pydantic AI messages.""" raise NotImplementedError + @classmethod + @abstractmethod + def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[MessageT]: + """Transform Pydantic AI messages into protocol-specific messages.""" + raise NotImplementedError + @abstractmethod def build_event_stream(self) -> UIEventStream[RunInputT, EventT, AgentDepsT, OutputDataT]: """Build a protocol-specific event stream transformer.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py index 5d45f50a7b..64bf5d8d0f 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/ag_ui/_adapter.py @@ -109,6 +109,11 @@ def state(self) -> dict[str, Any] | None: """Frontend state from the AG-UI run input.""" return self.run_input.state + @classmethod + def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[Message]: + """Transform Pydantic AI messages into AG-UI messages.""" + raise NotImplementedError('TODO: implement dump_messages method') # TODO: implement dump_messages method + @classmethod def load_messages(cls, messages: Sequence[Message]) -> list[ModelMessage]: """Transform AG-UI messages into Pydantic AI messages.""" diff --git a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py index 7eee52c419..9d799df64e 100644 --- a/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py +++ b/pydantic_ai_slim/pydantic_ai/ui/vercel_ai/_adapter.py @@ -2,7 +2,9 @@ from __future__ import annotations -from collections.abc import Sequence +import json +import uuid +from collections.abc import Callable, Sequence from dataclasses import dataclass from functools import cached_property from typing import TYPE_CHECKING @@ -12,6 +14,7 @@ from ...messages import ( AudioUrl, + BaseToolCallPart, BinaryContent, BuiltinToolCallPart, BuiltinToolReturnPart, @@ -19,6 +22,8 @@ FilePart, ImageUrl, ModelMessage, + ModelRequest, + ModelResponse, RetryPromptPart, SystemPromptPart, TextPart, @@ -35,6 +40,9 @@ from ._event_stream import VercelAIEventStream from .request_types import ( DataUIPart, + DynamicToolInputAvailablePart, + DynamicToolOutputAvailablePart, + DynamicToolOutputErrorPart, DynamicToolUIPart, FileUIPart, ReasoningUIPart, @@ -43,10 +51,12 @@ SourceUrlUIPart, StepStartUIPart, TextUIPart, + ToolInputAvailablePart, ToolOutputAvailablePart, ToolOutputErrorPart, ToolUIPart, UIMessage, + UIMessagePart, ) from .response_types import BaseChunk @@ -57,6 +67,7 @@ __all__ = ['VercelAIAdapter'] request_data_ta: TypeAdapter[RequestData] = TypeAdapter(RequestData) +BUILTIN_TOOL_CALL_ID_PREFIX = 'pyd_ai_builtin' @dataclass @@ -141,8 +152,15 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # builtin_tool = part.provider_executed tool_call_id = part.tool_call_id + args = part.input + if isinstance(args, str): + try: + args = json.loads(args) + except json.JSONDecodeError: + pass + if builtin_tool: call_part = BuiltinToolCallPart(tool_name=tool_name, tool_call_id=tool_call_id, args=args) builder.add(call_part) @@ -197,3 +215,226 @@ def load_messages(cls, messages: Sequence[UIMessage]) -> list[ModelMessage]: # assert_never(msg.role) return builder.messages + + @classmethod + def dump_messages( # noqa: C901 + cls, + messages: Sequence[ModelMessage], + *, + _id_generator: Callable[[], str] | None = None, + ) -> list[UIMessage]: + """Transform Pydantic AI messages into Vercel AI messages. + + Args: + messages: A sequence of ModelMessage objects to convert + _id_generator: Optional ID generator function for testing. If not provided, uses uuid.uuid4(). + + Returns: + A list of UIMessage objects in Vercel AI format + """ + + def _message_id_generator() -> str: + """Generate a message ID.""" + return _id_generator() if _id_generator is not None else str(uuid.uuid4()) + + tool_returns: dict[str, ToolReturnPart | BuiltinToolReturnPart] = {} + tool_errors: dict[str, RetryPromptPart] = {} + + for msg in messages: + if isinstance(msg, ModelRequest): + for part in msg.parts: + if isinstance(part, ToolReturnPart | BuiltinToolReturnPart): + tool_returns[part.tool_call_id] = part + elif isinstance(part, RetryPromptPart) and part.tool_name is not None: + tool_errors[part.tool_call_id] = part + + result: list[UIMessage] = [] + + for msg in messages: + if isinstance(msg, ModelRequest): + system_parts: list[SystemPromptPart] = [] + user_parts: list[UserPromptPart | ToolReturnPart | BuiltinToolReturnPart | RetryPromptPart] = [] + + for part in msg.parts: + if isinstance(part, SystemPromptPart): + system_parts.append(part) + elif isinstance( # pragma: no branch - All ModelRequest parts are covered + part, UserPromptPart | ToolReturnPart | BuiltinToolReturnPart | RetryPromptPart + ): + user_parts.append(part) + + if system_parts: + system_ui_parts: list[UIMessagePart] = [ + TextUIPart(text=part.content, state='done') for part in system_parts + ] + result.append(UIMessage(id=_message_id_generator(), role='system', parts=system_ui_parts)) + + # Note: Tool returns and retry prompts don't create user message parts + # They are only used to set the state of tool calls in assistant messages + if user_parts: # pragma: no branch - A ModelRequest with no user-visible parts is not tested + user_ui_parts: list[UIMessagePart] = [] + for part in user_parts: + if isinstance(part, UserPromptPart): + user_ui_parts.extend(_convert_user_prompt_part(part)) + elif isinstance(part, ToolReturnPart | BuiltinToolReturnPart | RetryPromptPart): + # Tool returns/errors don't create separate UI parts + # They're merged into the tool call in the assistant message + pass + + if user_ui_parts: + result.append(UIMessage(id=_message_id_generator(), role='user', parts=user_ui_parts)) + + elif isinstance( # pragma: no branch - All message types are covered (no tests for empty ModelResponse) + msg, ModelResponse + ): + ui_parts: list[UIMessagePart] = [] + text_parts: list[str] = [] + had_interruption = False + + # For builtin tools, returns can be in the same ModelResponse as calls + # Build a local mapping for this message + local_builtin_returns: dict[str, BuiltinToolReturnPart] = {} + for part in msg.parts: + if isinstance(part, BuiltinToolReturnPart): + local_builtin_returns[part.tool_call_id] = part + + for part in msg.parts: + if isinstance(part, BuiltinToolReturnPart): + # Skip builtin tool returns - they're handled by the tool call logic + continue + elif isinstance(part, TextPart): + # If this is the first text after an interruption, prepend separator + if had_interruption: + text_parts.append('\n\n' + part.content) + else: + text_parts.append(part.content) + elif isinstance(part, ThinkingPart): + if text_parts: + ui_parts.append(TextUIPart(text=''.join(text_parts), state='done')) + text_parts = [] + had_interruption = False + ui_parts.append(ReasoningUIPart(text=part.content, state='done')) + elif isinstance(part, FilePart): + if text_parts: + ui_parts.append(TextUIPart(text=''.join(text_parts), state='done')) + text_parts = [] + had_interruption = False + ui_parts.append( + FileUIPart( + url=part.content.data_uri, + media_type=part.content.media_type, + ) + ) + elif isinstance(part, BaseToolCallPart): # pragma: no branch - All assistant part types are covered + if text_parts: + ui_parts.append(TextUIPart(text=''.join(text_parts), state='done')) + text_parts = [] + + # Mark that we had an interruption for next text part + had_interruption = True + + if isinstance(part, BuiltinToolCallPart): + prefixed_id = _make_builtin_tool_call_id(part.provider_name, part.tool_call_id) + # Check local returns first (same message), then global returns (from ModelRequest) + builtin_return = local_builtin_returns.get(part.tool_call_id) or ( + tool_returns.get(part.tool_call_id) + if isinstance(tool_returns.get(part.tool_call_id), BuiltinToolReturnPart) + else None + ) + + if builtin_return: + content = builtin_return.model_response_str() + call_provider_metadata = ( + {'pydantic_ai': {'provider_name': part.provider_name}} + if part.provider_name + else None + ) + ui_parts.append( + ToolOutputAvailablePart( + type=f'tool-{part.tool_name}', + tool_call_id=prefixed_id, + input=part.args_as_json_str(), + output=content, + state='output-available', + provider_executed=True, + call_provider_metadata=call_provider_metadata, + ) + ) + else: # pragma: no cover - Builtin tool call without a return is not tested + ui_parts.append( + ToolInputAvailablePart( + type=f'tool-{part.tool_name}', + tool_call_id=prefixed_id, + input=part.args_as_json_str(), + state='input-available', + provider_executed=True, + ) + ) + else: + tool_return = tool_returns.get(part.tool_call_id) + tool_error = tool_errors.get(part.tool_call_id) + + if tool_return and isinstance(tool_return, ToolReturnPart): + content = tool_return.model_response_str() + ui_parts.append( + DynamicToolOutputAvailablePart( + tool_name=part.tool_name, + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + output=content, + state='output-available', + ) + ) + elif tool_error: + error_text = tool_error.model_response() + ui_parts.append( + DynamicToolOutputErrorPart( + tool_name=part.tool_name, + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + error_text=error_text, + state='output-error', + ) + ) + else: + ui_parts.append( + DynamicToolInputAvailablePart( + tool_name=part.tool_name, + tool_call_id=part.tool_call_id, + input=part.args_as_json_str(), + state='input-available', + ) + ) + + if text_parts: + ui_parts.append(TextUIPart(text=''.join(text_parts), state='done')) + + if ui_parts: # pragma: no branch - An empty ModelResponse is not tested + result.append(UIMessage(id=_message_id_generator(), role='assistant', parts=ui_parts)) + + return result + + +def _make_builtin_tool_call_id(provider_name: str | None, tool_call_id: str) -> str: + """Create a prefixed tool call ID for builtin tools.""" + return f'{BUILTIN_TOOL_CALL_ID_PREFIX}|{provider_name or ""}|{tool_call_id}' + + +def _convert_user_prompt_part(part: UserPromptPart) -> list[UIMessagePart]: + """Convert a UserPromptPart to a list of UI message parts.""" + ui_parts: list[UIMessagePart] = [] + + if isinstance(part.content, str): + ui_parts.append(TextUIPart(text=part.content, state='done')) + else: + for item in part.content: + if isinstance(item, str): + ui_parts.append(TextUIPart(text=item, state='done')) + elif isinstance(item, BinaryContent): + ui_parts.append(FileUIPart(url=item.data_uri, media_type=item.media_type)) + elif isinstance( + item, ImageUrl | AudioUrl | VideoUrl | DocumentUrl + ): # pragma: no branch - All content types are covered + ui_parts.append(FileUIPart(url=item.url, media_type=item.media_type)) + + return ui_parts diff --git a/tests/test_ui.py b/tests/test_ui.py index 38f9950ad5..93c311afe1 100644 --- a/tests/test_ui.py +++ b/tests/test_ui.py @@ -87,6 +87,10 @@ class DummyUIAdapter(UIAdapter[DummyUIRunInput, ModelMessage, str, AgentDepsT, O def build_run_input(cls, body: bytes) -> DummyUIRunInput: return DummyUIRunInput.model_validate_json(body) + @classmethod + def dump_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: + return list(messages) + @classmethod def load_messages(cls, messages: Sequence[ModelMessage]) -> list[ModelMessage]: return list(messages) diff --git a/tests/test_vercel_ai.py b/tests/test_vercel_ai.py index 085cd38631..eff335d73f 100644 --- a/tests/test_vercel_ai.py +++ b/tests/test_vercel_ai.py @@ -2,6 +2,7 @@ import json from collections.abc import AsyncIterator, MutableMapping +from itertools import count from typing import Any, cast import pytest @@ -48,6 +49,7 @@ from pydantic_ai.ui.vercel_ai import VercelAIAdapter, VercelAIEventStream from pydantic_ai.ui.vercel_ai.request_types import ( DynamicToolOutputAvailablePart, + DynamicToolOutputErrorPart, FileUIPart, ReasoningUIPart, SubmitMessage, @@ -82,6 +84,12 @@ ] +def predictable_id_generator(prefix: str = 'test-id-'): + """Create a predictable ID generator for testing.""" + c = count(1) + return lambda: f'{prefix}{next(c)}' + + @pytest.mark.skipif(not openai_import_successful(), reason='OpenAI not installed') async def test_run(allow_model_requests: None, openai_api_key: str): model = OpenAIResponsesModel('gpt-5', provider=OpenAIProvider(api_key=openai_api_key)) @@ -1832,7 +1840,7 @@ async def test_adapter_load_messages(): UserPromptPart( content=[ 'Here are some files:', - BinaryImage(data=b'fake', media_type='image/png'), + BinaryImage(data=b'fake', media_type='image/png', _identifier='c053ec'), ImageUrl(url='https://example.com/image.png', _media_type='image/png'), VideoUrl(url='https://example.com/video.mp4', _media_type='video/mp4'), AudioUrl(url='https://example.com/audio.mp3', _media_type='audio/mpeg'), @@ -1846,7 +1854,7 @@ async def test_adapter_load_messages(): parts=[ ThinkingPart(content='I should tell the user how nice those files are and share another one'), TextPart(content='Nice files, here is another one:'), - FilePart(content=BinaryImage(data=b'fake', media_type='image/png')), + FilePart(content=BinaryImage(data=b'fake', media_type='image/png', _identifier='c053ec')), ], timestamp=IsDatetime(), ), @@ -1964,3 +1972,605 @@ async def test_adapter_load_messages(): ), ] ) + + +async def test_adapter_dump_messages(): + """Test dumping Pydantic AI messages to Vercel AI format.""" + messages = [ + ModelRequest( + parts=[ + SystemPromptPart(content='You are a helpful assistant.'), + UserPromptPart(content='Hello, world!'), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Hi there!'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + # we need to dump the BaseModels to dicts for `IsStr` to work properly in snapshot + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'system', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'You are a helpful assistant.', 'state': 'done', 'provider_metadata': None} + ], + }, + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Hello, world!', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Hi there!', 'state': 'done', 'provider_metadata': None}], + }, + ] + ) + + +async def test_adapter_dump_messages_with_tools(): + """Test dumping messages with tool calls and returns.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Search for something')]), + ModelResponse( + parts=[ + TextPart(content='Let me search for that.'), + ToolCallPart( + tool_name='web_search', + args={'query': 'test query'}, + tool_call_id='tool_123', + ), + ] + ), + ModelRequest( + parts=[ + ToolReturnPart( + tool_name='web_search', + content={'results': ['result1', 'result2']}, + tool_call_id='tool_123', + ) + ] + ), + ModelResponse(parts=[TextPart(content='Here are the results.')]), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Search for something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Let me search for that.', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'dynamic-tool', + 'tool_name': 'web_search', + 'tool_call_id': 'tool_123', + 'state': 'output-available', + 'input': '{"query":"test query"}', + 'output': '{"results":["result1","result2"]}', + 'call_provider_metadata': None, + 'preliminary': None, + }, + ], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Here are the results.', 'state': 'done', 'provider_metadata': None} + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_builtin_tools(): + """Test dumping messages with builtin tool calls.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Search for something')]), + ModelResponse( + parts=[ + BuiltinToolCallPart( + tool_name='web_search', + args={'query': 'test'}, + tool_call_id='tool_456', + provider_name='openai', + ), + BuiltinToolReturnPart( + tool_name='web_search', + content={'status': 'completed'}, + tool_call_id='tool_456', + provider_name='openai', + ), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Search for something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'tool-web_search', + 'tool_call_id': 'pyd_ai_builtin|openai|tool_456', + 'state': 'output-available', + 'input': '{"query":"test"}', + 'output': '{"status":"completed"}', + 'provider_executed': True, + 'call_provider_metadata': {'pydantic_ai': {'provider_name': 'openai'}}, + 'preliminary': None, + } + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_thinking(): + """Test dumping messages with thinking parts.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Tell me something')]), + ModelResponse( + parts=[ + ThinkingPart(content='Let me think about this...'), + TextPart(content='Here is my answer.'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'user', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'Tell me something', 'state': 'done', 'provider_metadata': None}], + }, + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'reasoning', + 'text': 'Let me think about this...', + 'state': 'done', + 'provider_metadata': None, + }, + {'type': 'text', 'text': 'Here is my answer.', 'state': 'done', 'provider_metadata': None}, + ], + }, + ] + ) + + +async def test_adapter_dump_messages_with_files(): + """Test dumping messages with file parts.""" + messages = [ + ModelRequest( + parts=[ + UserPromptPart( + content=[ + 'Here is an image:', + BinaryImage(data=b'fake_image', media_type='image/png'), + ImageUrl(url='https://example.com/image.png', media_type='image/png'), + ] + ) + ] + ), + ModelResponse( + parts=[ + TextPart(content='Nice image!'), + FilePart(content=BinaryContent(data=b'response_file', media_type='application/pdf')), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + # Check user message with files + assert ui_messages[0].role == 'user' + assert len(ui_messages[0].parts) == 3 + assert isinstance(ui_messages[0].parts[0], TextUIPart) + assert isinstance(ui_messages[0].parts[1], FileUIPart) + assert ui_messages[0].parts[1].url.startswith('data:image/png;base64,') + assert isinstance(ui_messages[0].parts[2], FileUIPart) + assert ui_messages[0].parts[2].url == 'https://example.com/image.png' + + # Check assistant message with file + assert ui_messages[1].role == 'assistant' + assert isinstance(ui_messages[1].parts[0], TextUIPart) + assert isinstance(ui_messages[1].parts[1], FileUIPart) + assert ui_messages[1].parts[1].url.startswith('data:application/pdf;base64,') + + +async def test_adapter_dump_messages_with_retry(): + """Test dumping messages with retry prompts.""" + messages = [ + ModelRequest(parts=[UserPromptPart(content='Do something')]), + ModelResponse( + parts=[ + ToolCallPart(tool_name='my_tool', args={'arg': 'value'}, tool_call_id='tool_789'), + ] + ), + ModelRequest( + parts=[ + RetryPromptPart( + content='Tool failed with error', + tool_name='my_tool', + tool_call_id='tool_789', + ) + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + + # Check assistant message has tool call with error + assert ui_messages[1].role == 'assistant' + tool_part = ui_messages[1].parts[0] + assert isinstance(tool_part, DynamicToolOutputErrorPart) + assert tool_part.tool_name == 'my_tool' + assert tool_part.state == 'output-error' + assert 'Tool failed with error' in tool_part.error_text + + +async def test_adapter_dump_messages_consecutive_text(): + """Test that consecutive text parts are concatenated correctly.""" + messages = [ + ModelResponse( + parts=[ + TextPart(content='First '), + TextPart(content='second'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [{'type': 'text', 'text': 'First second', 'state': 'done', 'provider_metadata': None}], + } + ] + ) + + +async def test_adapter_dump_messages_text_with_interruption(): + """Test text concatenation with interruption.""" + messages = [ + ModelResponse( + parts=[ + TextPart(content='Before tool'), + BuiltinToolCallPart( + tool_name='test', + args={}, + tool_call_id='t1', + provider_name='test', + ), + BuiltinToolReturnPart( + tool_name='test', + content='result', + tool_call_id='t1', + provider_name='test', + ), + TextPart(content='After tool'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Before tool', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'tool-test', + 'tool_call_id': 'pyd_ai_builtin|test|t1', + 'state': 'output-available', + 'input': '{}', + 'output': 'result', + 'provider_executed': True, + 'call_provider_metadata': {'pydantic_ai': {'provider_name': 'test'}}, + 'preliminary': None, + }, + { + 'type': 'text', + 'text': """\ + + +After tool\ +""", + 'state': 'done', + 'provider_metadata': None, + }, + ], + } + ] + ) + + +async def test_adapter_dump_load_roundtrip(): + """Test that dump_messages and load_messages are approximately inverse operations.""" + original_messages = [ + ModelRequest( + parts=[ + SystemPromptPart(content='System message'), + UserPromptPart(content='User message'), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Response text'), + ToolCallPart(tool_name='tool1', args={'key': 'value'}, tool_call_id='tc1'), + ] + ), + ModelRequest(parts=[ToolReturnPart(tool_name='tool1', content='tool result', tool_call_id='tc1')]), + ModelResponse( + parts=[ + TextPart(content='Final response'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(original_messages) + + # Load back to Pydantic AI format + reloaded_messages = VercelAIAdapter.load_messages(ui_messages) + + # Can't use `assert reloaded_messages == original_messages` because the timestamps will be different + assert reloaded_messages == snapshot( + [ + ModelRequest( + parts=[ + SystemPromptPart(content='System message', timestamp=IsDatetime()), + UserPromptPart(content='User message', timestamp=IsDatetime()), + ] + ), + ModelResponse( + parts=[ + TextPart(content='Response text'), + ToolCallPart(tool_name='tool1', args={'key': 'value'}, tool_call_id='tc1'), + ], + timestamp=IsDatetime(), + ), + ModelRequest( + parts=[ + ToolReturnPart(tool_name='tool1', content='tool result', tool_call_id='tc1', timestamp=IsDatetime()) + ] + ), + ModelResponse(parts=[TextPart(content='Final response')], timestamp=IsDatetime()), + ] + ) + + +async def test_adapter_dump_messages_text_before_thinking(): + """Test dumping messages where text precedes a thinking part.""" + messages = [ + ModelResponse( + parts=[ + TextPart(content='Let me check.'), + ThinkingPart(content='Okay, I am checking now.'), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + {'type': 'text', 'text': 'Let me check.', 'state': 'done', 'provider_metadata': None}, + { + 'type': 'reasoning', + 'text': 'Okay, I am checking now.', + 'state': 'done', + 'provider_metadata': None, + }, + ], + } + ] + ) + + +async def test_adapter_dump_messages_tool_call_without_return(): + """Test dumping messages with a tool call that has no corresponding result.""" + messages = [ + ModelResponse( + parts=[ + ToolCallPart( + tool_name='get_weather', + args={'city': 'New York'}, + tool_call_id='tool_abc', + ), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'dynamic-tool', + 'tool_name': 'get_weather', + 'tool_call_id': 'tool_abc', + 'state': 'input-available', + 'input': '{"city":"New York"}', + 'call_provider_metadata': None, + } + ], + } + ] + ) + + +async def test_adapter_dump_messages_assistant_starts_with_tool(): + """Test an assistant message that starts with a tool call instead of text.""" + messages = [ + ModelResponse( + parts=[ + ToolCallPart(tool_name='t', args={}, tool_call_id='tc1'), + TextPart(content='Some text'), + ] + ) + ] + ui_messages = VercelAIAdapter.dump_messages(messages) + + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'dynamic-tool', + 'tool_name': 't', + 'tool_call_id': 'tc1', + 'state': 'input-available', + 'input': '{}', + 'call_provider_metadata': None, + }, + { + 'type': 'text', + 'text': """\ + + +Some text\ +""", + 'state': 'done', + 'provider_metadata': None, + }, + ], + } + ] + ) + + +async def test_convert_user_prompt_part_without_urls(): + """Test converting a user prompt with only text and binary content.""" + from pydantic_ai.ui.vercel_ai._adapter import _convert_user_prompt_part # pyright: ignore[reportPrivateUsage] + + part = UserPromptPart(content=['text part', BinaryContent(data=b'data', media_type='application/pdf')]) + ui_parts = _convert_user_prompt_part(part) + assert ui_parts == snapshot( + [ + TextUIPart(text='text part', state='done'), + FileUIPart(media_type='application/pdf', url='data:application/pdf;base64,ZGF0YQ=='), + ] + ) + + +async def test_adapter_dump_messages_file_without_text(): + """Test a file part appearing without any preceding text.""" + messages = [ + ModelResponse( + parts=[ + FilePart(content=BinaryContent(data=b'file_data', media_type='image/png')), + ] + ), + ] + + ui_messages = VercelAIAdapter.dump_messages(messages) + ui_message_dicts = [msg.model_dump() for msg in ui_messages] + + assert ui_message_dicts == snapshot( + [ + { + 'id': IsStr(), + 'role': 'assistant', + 'metadata': None, + 'parts': [ + { + 'type': 'file', + 'media_type': 'image/png', + 'filename': None, + 'url': 'data:image/png;base64,ZmlsZV9kYXRh', + 'provider_metadata': None, + } + ], + } + ] + ) + + +async def test_convert_user_prompt_part_only_urls(): + """Test converting a user prompt with only URL content (no binary).""" + from pydantic_ai.ui.vercel_ai._adapter import _convert_user_prompt_part # pyright: ignore[reportPrivateUsage] + + part = UserPromptPart( + content=[ + ImageUrl(url='https://example.com/img.png', media_type='image/png'), + VideoUrl(url='https://example.com/vid.mp4', media_type='video/mp4'), + ] + ) + ui_parts = _convert_user_prompt_part(part) + assert ui_parts == snapshot( + [ + FileUIPart(media_type='image/png', url='https://example.com/img.png'), + FileUIPart(media_type='video/mp4', url='https://example.com/vid.mp4'), + ] + )