diff --git a/docs/output.md b/docs/output.md
index 182a753944..99a2a59d45 100644
--- a/docs/output.md
+++ b/docs/output.md
@@ -308,7 +308,7 @@ _(This example is complete, it can be run "as is")_
#### Native Output
-Native Output mode uses a model's native "Structured Outputs" feature (aka "JSON Schema response format"), where the model is forced to only output text matching the provided JSON schema. Note that this is not supported by all models, and sometimes comes with restrictions. For example, Anthropic does not support this at all, and Gemini cannot use tools at the same time as structured output, and attempting to do so will result in an error.
+Native Output mode uses a model's native "Structured Outputs" feature (aka "JSON Schema response format"), where the model is forced to only output text matching the provided JSON schema. Note that this is not supported by all models, and sometimes comes with restrictions. For example, Gemini cannot use tools at the same time as structured output, and attempting to do so will result in an error.
To use this mode, you can wrap the output type(s) in the [`NativeOutput`][pydantic_ai.output.NativeOutput] marker class that also lets you specify a `name` and `description` if the name and docstring of the type or function are not sufficient.
diff --git a/pydantic_ai_slim/pydantic_ai/_json_schema.py b/pydantic_ai_slim/pydantic_ai/_json_schema.py
index 2eb32eb50f..a0360dc7fa 100644
--- a/pydantic_ai_slim/pydantic_ai/_json_schema.py
+++ b/pydantic_ai_slim/pydantic_ai/_json_schema.py
@@ -15,6 +15,9 @@
class JsonSchemaTransformer(ABC):
"""Walks a JSON schema, applying transformations to it at each level.
+ The transformer is called during a model's prepare_request() step to build the JSON schema
+ before it is sent to the model provider.
+
Note: We may eventually want to rework tools to build the JSON schema from the type directly, using a subclass of
pydantic.json_schema.GenerateJsonSchema, rather than making use of this machinery.
"""
@@ -30,8 +33,15 @@ def __init__(
self.schema = schema
self.strict = strict
- self.is_strict_compatible = True # Can be set to False by subclasses to set `strict` on `ToolDefinition` when set not set by user explicitly
+ """The `strict` parameter forces the conversion of the original JSON schema (`self.schema`) of a `ToolDefinition` or `OutputObjectDefinition` to a format supported by the model provider.
+
+ The "strict mode" offered by model providers ensures that the model's output adheres closely to the defined schema. However, not all model providers offer it, and their support for various schema features may differ. For example, a model provider's required schema may not support certain validation constraints like `minLength` or `pattern`.
+ """
+ self.is_strict_compatible = True
+ """Whether the schema is compatible with strict mode.
+ This value is used to set `ToolDefinition.strict` or `OutputObjectDefinition.strict` when their values are `None`.
+ """
self.prefer_inlined_defs = prefer_inlined_defs
self.simplify_nullable_unions = simplify_nullable_unions
diff --git a/pydantic_ai_slim/pydantic_ai/models/__init__.py b/pydantic_ai_slim/pydantic_ai/models/__init__.py
index a9b3789855..b94bf9a078 100644
--- a/pydantic_ai_slim/pydantic_ai/models/__init__.py
+++ b/pydantic_ai_slim/pydantic_ai/models/__init__.py
@@ -57,9 +57,6 @@
Literal[
'anthropic:claude-3-5-haiku-20241022',
'anthropic:claude-3-5-haiku-latest',
- 'anthropic:claude-3-5-sonnet-20240620',
- 'anthropic:claude-3-5-sonnet-20241022',
- 'anthropic:claude-3-5-sonnet-latest',
'anthropic:claude-3-7-sonnet-20250219',
'anthropic:claude-3-7-sonnet-latest',
'anthropic:claude-3-haiku-20240307',
@@ -380,7 +377,10 @@ async def request(
model_settings: ModelSettings | None,
model_request_parameters: ModelRequestParameters,
) -> ModelResponse:
- """Make a request to the model."""
+ """Make a request to the model.
+
+ This is ultimately called by `pydantic_ai._agent_graph.ModelRequestNode._make_request(...)`.
+ """
raise NotImplementedError()
async def count_tokens(
@@ -987,23 +987,27 @@ def get_user_agent() -> str:
return f'pydantic-ai/{__version__}'
-def _customize_tool_def(transformer: type[JsonSchemaTransformer], t: ToolDefinition):
- schema_transformer = transformer(t.parameters_json_schema, strict=t.strict)
+def _customize_tool_def(transformer: type[JsonSchemaTransformer], tool_def: ToolDefinition):
+ """Customize the tool definition using the given transformer.
+
+ If the tool definition has `strict` set to None, the strictness will be inferred from the transformer.
+ """
+ schema_transformer = transformer(tool_def.parameters_json_schema, strict=tool_def.strict)
parameters_json_schema = schema_transformer.walk()
return replace(
- t,
+ tool_def,
parameters_json_schema=parameters_json_schema,
- strict=schema_transformer.is_strict_compatible if t.strict is None else t.strict,
+ strict=schema_transformer.is_strict_compatible if tool_def.strict is None else tool_def.strict,
)
-def _customize_output_object(transformer: type[JsonSchemaTransformer], o: OutputObjectDefinition):
- schema_transformer = transformer(o.json_schema, strict=o.strict)
+def _customize_output_object(transformer: type[JsonSchemaTransformer], output_object: OutputObjectDefinition):
+ schema_transformer = transformer(output_object.json_schema, strict=output_object.strict)
json_schema = schema_transformer.walk()
return replace(
- o,
+ output_object,
json_schema=json_schema,
- strict=schema_transformer.is_strict_compatible if o.strict is None else o.strict,
+ strict=schema_transformer.is_strict_compatible if output_object.strict is None else output_object.strict,
)
diff --git a/pydantic_ai_slim/pydantic_ai/models/anthropic.py b/pydantic_ai_slim/pydantic_ai/models/anthropic.py
index 342c141b9d..f025184119 100644
--- a/pydantic_ai_slim/pydantic_ai/models/anthropic.py
+++ b/pydantic_ai_slim/pydantic_ai/models/anthropic.py
@@ -77,6 +77,7 @@
BetaContentBlockParam,
BetaImageBlockParam,
BetaInputJSONDelta,
+ BetaJSONOutputFormatParam,
BetaMCPToolResultBlock,
BetaMCPToolUseBlock,
BetaMCPToolUseBlockParam,
@@ -205,8 +206,9 @@ def __init__(
model_name: The name of the Anthropic model to use. List of model names available
[here](https://docs.anthropic.com/en/docs/about-claude/models).
provider: The provider to use for the Anthropic API. Can be either the string 'anthropic' or an
- instance of `Provider[AsyncAnthropicClient]`. If not provided, the other parameters will be used.
+ instance of `Provider[AsyncAnthropicClient]`. Defaults to 'anthropic'.
profile: The model profile to use. Defaults to a profile picked by the provider based on the model name.
+ The default 'anthropic' provider will use the default `..profiles.anthropic_model_profile`.
settings: Default model settings for this model instance.
"""
self._model_name = model_name
@@ -296,14 +298,29 @@ def prepare_request(
and thinking.get('type') == 'enabled'
):
if model_request_parameters.output_mode == 'auto':
- model_request_parameters = replace(model_request_parameters, output_mode='prompted')
+ output_mode = 'native' if self.profile.supports_json_schema_output else 'prompted'
+ model_request_parameters = replace(model_request_parameters, output_mode=output_mode)
elif (
model_request_parameters.output_mode == 'tool' and not model_request_parameters.allow_text_output
): # pragma: no branch
# This would result in `tool_choice=required`, which Anthropic does not support with thinking.
+ output_mode = 'NativeOutput' if self.profile.supports_json_schema_output else 'PromptedOutput'
raise UserError(
- 'Anthropic does not support thinking and output tools at the same time. Use `output_type=PromptedOutput(...)` instead.'
+ f'Anthropic does not support thinking and output tools at the same time. Use `output_type={output_mode}(...)` instead.'
)
+
+ # NOTE forcing `strict=True` here is a bit eager, because the transformer may still determine that the transformation is lossy.
+ # so we're relying on anthropic's strict mode being better than prompting the model with pydantic's schema
+ if model_request_parameters.output_mode == 'native' and model_request_parameters.output_object is not None:
+ # force strict=True for native output
+ # this needs to be done here because `super().prepare_request` calls
+ # -> Model.customize_request_parameters(model_request_parameters) which calls
+ # -> -> _customize_output_object(transformer: type[JsonSchemaTransformer], output_object: OutputObjectDefinition)
+ # which finally instantiates the transformer (default AnthropicJsonSchemaTransformer)
+ # `schema_transformer = transformer(output_object.json_schema, strict=output_object.strict)`
+ model_request_parameters = replace(
+ model_request_parameters, output_object=replace(model_request_parameters.output_object, strict=True)
+ )
return super().prepare_request(model_settings, model_request_parameters)
@overload
@@ -333,16 +350,24 @@ async def _messages_create(
model_settings: AnthropicModelSettings,
model_request_parameters: ModelRequestParameters,
) -> BetaMessage | AsyncStream[BetaRawMessageStreamEvent]:
- # standalone function to make it easier to override
+ """Calls the Anthropic API to create a message.
+
+ This is the last step before sending the request to the API.
+ Most preprocessing has happened in `prepare_request()`.
+ """
tools = self._get_tools(model_request_parameters, model_settings)
- tools, mcp_servers, beta_features = self._add_builtin_tools(tools, model_request_parameters)
+ tools, mcp_servers, builtin_tool_betas = self._add_builtin_tools(tools, model_request_parameters)
+ output_format = self._native_output_format(model_request_parameters)
tool_choice = self._infer_tool_choice(tools, model_settings, model_request_parameters)
system_prompt, anthropic_messages = await self._map_message(messages, model_request_parameters, model_settings)
+ betas_set = self._get_required_betas(tools, model_request_parameters)
+ betas_set.update(builtin_tool_betas)
+
try:
- extra_headers = self._map_extra_headers(beta_features, model_settings)
+ betas, extra_headers = self._prepare_betas_and_headers(betas_set, model_settings)
return await self.client.beta.messages.create(
max_tokens=model_settings.get('max_tokens', 4096),
@@ -352,6 +377,8 @@ async def _messages_create(
tools=tools or OMIT,
tool_choice=tool_choice or OMIT,
mcp_servers=mcp_servers or OMIT,
+ output_format=output_format or OMIT,
+ betas=betas or OMIT,
stream=stream,
thinking=model_settings.get('anthropic_thinking', OMIT),
stop_sequences=model_settings.get('stop_sequences', OMIT),
@@ -380,14 +407,18 @@ async def _messages_count_tokens(
# standalone function to make it easier to override
tools = self._get_tools(model_request_parameters, model_settings)
- tools, mcp_servers, beta_features = self._add_builtin_tools(tools, model_request_parameters)
+ tools, mcp_servers, builtin_tool_betas = self._add_builtin_tools(tools, model_request_parameters)
+ output_format = self._native_output_format(model_request_parameters)
tool_choice = self._infer_tool_choice(tools, model_settings, model_request_parameters)
system_prompt, anthropic_messages = await self._map_message(messages, model_request_parameters, model_settings)
+ betas = self._get_required_betas(tools, model_request_parameters)
+ betas.update(builtin_tool_betas)
+
try:
- extra_headers = self._map_extra_headers(beta_features, model_settings)
+ betas_list, extra_headers = self._prepare_betas_and_headers(betas, model_settings)
return await self.client.beta.messages.count_tokens(
system=system_prompt or OMIT,
@@ -396,6 +427,8 @@ async def _messages_count_tokens(
tools=tools or OMIT,
tool_choice=tool_choice or OMIT,
mcp_servers=mcp_servers or OMIT,
+ betas=betas_list or OMIT,
+ output_format=output_format or OMIT,
thinking=model_settings.get('anthropic_thinking', OMIT),
timeout=model_settings.get('timeout', NOT_GIVEN),
extra_headers=extra_headers,
@@ -497,10 +530,31 @@ def _get_tools(
return tools
+ def _get_required_betas(
+ self, tools: list[BetaToolUnionParam], model_request_parameters: ModelRequestParameters
+ ) -> set[str]:
+ """Determine which beta features are needed based on tools and output format.
+
+ Args:
+ tools: The transformed tool dictionaries that will be sent to the API
+ model_request_parameters: Model request parameters containing output settings
+
+ Returns:
+ Set of beta feature strings (naturally deduplicated)
+ """
+ betas: set[str] = set()
+
+ has_strict_tools = any(tool.get('strict') for tool in tools)
+
+ if has_strict_tools or model_request_parameters.output_mode == 'native':
+ betas.add('structured-outputs-2025-11-13')
+
+ return betas
+
def _add_builtin_tools(
self, tools: list[BetaToolUnionParam], model_request_parameters: ModelRequestParameters
- ) -> tuple[list[BetaToolUnionParam], list[BetaRequestMCPServerURLDefinitionParam], list[str]]:
- beta_features: list[str] = []
+ ) -> tuple[list[BetaToolUnionParam], list[BetaRequestMCPServerURLDefinitionParam], set[str]]:
+ beta_features: set[str] = set()
mcp_servers: list[BetaRequestMCPServerURLDefinitionParam] = []
for tool in model_request_parameters.builtin_tools:
if isinstance(tool, WebSearchTool):
@@ -517,14 +571,14 @@ def _add_builtin_tools(
)
elif isinstance(tool, CodeExecutionTool): # pragma: no branch
tools.append(BetaCodeExecutionTool20250522Param(name='code_execution', type='code_execution_20250522'))
- beta_features.append('code-execution-2025-05-22')
+ beta_features.add('code-execution-2025-05-22')
elif isinstance(tool, MemoryTool): # pragma: no branch
if 'memory' not in model_request_parameters.tool_defs:
raise UserError("Built-in `MemoryTool` requires a 'memory' tool to be defined.")
# Replace the memory tool definition with the built-in memory tool
tools = [tool for tool in tools if tool['name'] != 'memory']
tools.append(BetaMemoryTool20250818Param(name='memory', type='memory_20250818'))
- beta_features.append('context-management-2025-06-27')
+ beta_features.add('context-management-2025-06-27')
elif isinstance(tool, MCPServerTool) and tool.url:
mcp_server_url_definition_param = BetaRequestMCPServerURLDefinitionParam(
type='url',
@@ -539,7 +593,7 @@ def _add_builtin_tools(
if tool.authorization_token: # pragma: no cover
mcp_server_url_definition_param['authorization_token'] = tool.authorization_token
mcp_servers.append(mcp_server_url_definition_param)
- beta_features.append('mcp-client-2025-04-04')
+ beta_features.add('mcp-client-2025-04-04')
else: # pragma: no cover
raise UserError(
f'`{tool.__class__.__name__}` is not supported by `AnthropicModel`. If it should be, please file an issue.'
@@ -567,15 +621,28 @@ def _infer_tool_choice(
return tool_choice
- def _map_extra_headers(self, beta_features: list[str], model_settings: AnthropicModelSettings) -> dict[str, str]:
- """Apply beta_features to extra_headers in model_settings."""
+ def _prepare_betas_and_headers(
+ self, betas: set[str], model_settings: AnthropicModelSettings
+ ) -> tuple[list[str], dict[str, str]]:
+ """Prepare beta features list and extra headers for API request.
+
+ Handles merging custom anthropic-beta header from extra_headers into betas set
+ and ensuring User-Agent is set.
+
+ Args:
+ betas: Set of beta feature strings (naturally deduplicated)
+ model_settings: Model settings containing extra_headers
+
+ Returns:
+ Tuple of (betas list, extra_headers dict)
+ """
extra_headers = model_settings.get('extra_headers', {})
extra_headers.setdefault('User-Agent', get_user_agent())
- if beta_features:
- if 'anthropic-beta' in extra_headers:
- beta_features.insert(0, extra_headers['anthropic-beta'])
- extra_headers['anthropic-beta'] = ','.join(beta_features)
- return extra_headers
+
+ if beta_header := extra_headers.pop('anthropic-beta', None):
+ betas.update({stripped_beta for beta in beta_header.split(',') if (stripped_beta := beta.strip())})
+
+ return sorted(betas), extra_headers
async def _map_message( # noqa: C901
self,
@@ -846,13 +913,23 @@ async def _map_user_prompt(
else:
raise RuntimeError(f'Unsupported content type: {type(item)}') # pragma: no cover
- @staticmethod
- def _map_tool_definition(f: ToolDefinition) -> BetaToolParam:
- return {
+ def _map_tool_definition(self, f: ToolDefinition) -> BetaToolParam:
+ """Maps a `ToolDefinition` dataclass to an Anthropic `BetaToolParam` dictionary."""
+ tool_param: BetaToolParam = {
'name': f.name,
'description': f.description or '',
'input_schema': f.parameters_json_schema,
}
+ if f.strict and self.profile.supports_json_schema_output: # pragma: no branch
+ tool_param['strict'] = f.strict
+ return tool_param
+
+ @staticmethod
+ def _native_output_format(model_request_parameters: ModelRequestParameters) -> BetaJSONOutputFormatParam | None:
+ if model_request_parameters.output_mode != 'native':
+ return None
+ assert model_request_parameters.output_object is not None
+ return {'type': 'json_schema', 'schema': model_request_parameters.output_object.json_schema}
def _map_usage(
diff --git a/pydantic_ai_slim/pydantic_ai/models/openai.py b/pydantic_ai_slim/pydantic_ai/models/openai.py
index 6db3742320..338482608e 100644
--- a/pydantic_ai_slim/pydantic_ai/models/openai.py
+++ b/pydantic_ai_slim/pydantic_ai/models/openai.py
@@ -1569,7 +1569,7 @@ async def _map_messages( # noqa: C901
param['id'] = id
openai_messages.append(param)
elif isinstance(item, BuiltinToolCallPart):
- if item.provider_name == self.system and send_item_ids:
+ if item.provider_name == self.system and send_item_ids: # pragma: no branch
if (
item.tool_name == CodeExecutionTool.kind
and item.tool_call_id
@@ -1639,7 +1639,7 @@ async def _map_messages( # noqa: C901
openai_messages.append(mcp_call_item)
elif isinstance(item, BuiltinToolReturnPart):
- if item.provider_name == self.system and send_item_ids:
+ if item.provider_name == self.system and send_item_ids: # pragma: no branch
if (
item.tool_name == CodeExecutionTool.kind
and code_interpreter_item is not None
diff --git a/pydantic_ai_slim/pydantic_ai/profiles/__init__.py b/pydantic_ai_slim/pydantic_ai/profiles/__init__.py
index dace9f2b32..84a1c04012 100644
--- a/pydantic_ai_slim/pydantic_ai/profiles/__init__.py
+++ b/pydantic_ai_slim/pydantic_ai/profiles/__init__.py
@@ -25,9 +25,17 @@ class ModelProfile:
supports_tools: bool = True
"""Whether the model supports tools."""
supports_json_schema_output: bool = False
- """Whether the model supports JSON schema output."""
+ """Whether the model supports JSON schema output.
+
+ This is also referred to as 'native' support for structured output.
+ Relates to the `NativeOutput` output type.
+ """
supports_json_object_output: bool = False
- """Whether the model supports JSON object output."""
+ """Whether the model supports a dedicated mode to enforce JSON output, without necessarily sending a schema.
+
+ E.g. [OpenAI's JSON mode](https://platform.openai.com/docs/guides/structured-outputs#json-mode)
+ Relates to the `PromptedOutput` output type.
+ """
supports_image_output: bool = False
"""Whether the model supports image output."""
default_structured_output_mode: StructuredOutputMode = 'tool'
diff --git a/pydantic_ai_slim/pydantic_ai/profiles/anthropic.py b/pydantic_ai_slim/pydantic_ai/profiles/anthropic.py
index f6a2755819..1b683ea6c0 100644
--- a/pydantic_ai_slim/pydantic_ai/profiles/anthropic.py
+++ b/pydantic_ai_slim/pydantic_ai/profiles/anthropic.py
@@ -1,8 +1,155 @@
from __future__ import annotations as _annotations
+from copy import deepcopy
+from dataclasses import dataclass
+
+from typing_extensions import assert_never
+
+from .._json_schema import JsonSchema, JsonSchemaTransformer
from . import ModelProfile
def anthropic_model_profile(model_name: str) -> ModelProfile | None:
- """Get the model profile for an Anthropic model."""
- return ModelProfile(thinking_tags=('', ''))
+ """Get the model profile for an Anthropic model.
+
+ The profile is set as soon as the model is instantiated.
+ """
+ models_that_support_json_schema_output = ('claude-sonnet-4-5', 'claude-opus-4-1')
+ # anthropic introduced support for both structured outputs and strict tool use
+ # https://docs.claude.com/en/docs/build-with-claude/structured-outputs#example-usage
+ supports_json_schema_output = model_name.startswith(models_that_support_json_schema_output)
+ return ModelProfile(
+ thinking_tags=('', ''),
+ supports_json_schema_output=supports_json_schema_output,
+ json_schema_transformer=AnthropicJsonSchemaTransformer,
+ )
+
+
+@dataclass(init=False)
+class AnthropicJsonSchemaTransformer(JsonSchemaTransformer):
+ """Transforms schemas to the subset supported by Anthropic structured outputs.
+
+ The transformer is applied (if it is applied) when the [AnthropicModel.prepare_request](../pydantic_ai_slim/pydantic_ai/models/anthropic.py) is called.
+
+ Anthropic's SDK `transform_schema()` automatically:
+ - Adds `additionalProperties: false` to all objects (required by API)
+ - Removes unsupported constraints (minLength, pattern, etc.)
+ - Moves removed constraints to description field
+ - Removes title and $schema fields
+
+ When `strict=None`, we compare before/after to detect if constraints were dropped.
+ """
+
+ def walk(self) -> JsonSchema:
+ from anthropic import transform_schema
+
+ schema = super().walk()
+
+ # no transformation if specifically non-strict
+ if self.strict is False:
+ return schema
+ else:
+ transformed = transform_schema(schema)
+ has_lossy_changes = self._has_lossy_changes(schema, transformed)
+ self.is_strict_compatible = not has_lossy_changes
+
+ # this is the default
+ if self.strict is None:
+ # is_strict_compatible sets the ToolDefinition.strict value when tool_def.strict is None
+ return transformed if self.is_strict_compatible else schema
+ else:
+ # strict=True
+ # self.is_strict_compatible won't be used by caller bc strict is explicit
+ return transformed
+
+ def transform(self, schema: JsonSchema) -> JsonSchema:
+ schema.pop('title', None)
+ schema.pop('$schema', None)
+ return schema
+
+ @staticmethod
+ def _has_lossy_changes(before: JsonSchema, after: JsonSchema) -> bool: # noqa: C901
+ """Check if transformation dropped validation constraints.
+
+ Safe changes that don't count as lossy:
+ - Adding additionalProperties: false
+ - Removing title, $schema, or other metadata fields
+ - Reordering keys
+
+ Lossy changes:
+ - Removing validation constraints (minLength, pattern, minimum, etc.)
+ - Changing constraint values
+ - Moving constraints to description field
+ """
+
+ def normalize(schema: JsonSchema) -> JsonSchema:
+ """Remove fields that are safe to add/remove."""
+ normalized = deepcopy(schema)
+ normalized.pop('additionalProperties', None)
+ normalized.pop('title', None)
+ normalized.pop('$schema', None)
+ return normalized
+
+ def has_lossy_object_changes(before_obj: JsonSchema, after_obj: JsonSchema) -> bool:
+ """Recursively check for lossy changes in object schemas.
+
+ Returns:
+ True if validation constraints were removed or modified (lossy changes detected).
+ False if all validation constraints are preserved (no lossy changes).
+ """
+ validation_keys = {
+ 'minLength',
+ 'maxLength',
+ 'pattern',
+ 'format',
+ 'minimum',
+ 'maximum',
+ 'exclusiveMinimum',
+ 'exclusiveMaximum',
+ 'minItems',
+ 'maxItems',
+ 'uniqueItems',
+ 'minProperties',
+ 'maxProperties',
+ }
+
+ for key in validation_keys:
+ if key in before_obj and key not in after_obj:
+ return True
+ # should never happen that an sdk modifies a constraint value
+ if key in before_obj and key in after_obj and before_obj[key] != after_obj[key]:
+ return True # pragma: no cover
+
+ before_props = before_obj.get('properties', {})
+ after_props = after_obj.get('properties', {})
+ for prop_name, before_prop in before_props.items():
+ if prop_name in after_props: # pragma: no branch
+ if has_lossy_schema_changes(before_prop, after_props[prop_name]):
+ return True
+
+ if 'items' in before_obj and 'items' in after_obj:
+ if has_lossy_schema_changes(before_obj['items'], after_obj['items']):
+ return True
+
+ before_defs = before_obj.get('$defs', {})
+ after_defs = after_obj.get('$defs', {})
+ for def_name, before_def in before_defs.items():
+ if def_name in after_defs: # pragma: no branch
+ if has_lossy_schema_changes(before_def, after_defs[def_name]): # pragma: no branch
+ return True
+
+ return False
+
+ def has_lossy_schema_changes(before_schema: JsonSchema, after_schema: JsonSchema) -> bool:
+ """Check a single schema object for lossy changes.
+
+ Returns:
+ True if validation constraints were removed or modified (lossy changes detected).
+ False if all validation constraints are preserved (no lossy changes).
+ """
+ if isinstance(before_schema, dict) and isinstance(after_schema, dict):
+ return has_lossy_object_changes(before_schema, after_schema)
+ # schemas should always be dicts
+ assert_never(False)
+
+ return has_lossy_schema_changes(normalize(before), normalize(after))
diff --git a/pydantic_ai_slim/pydantic_ai/tools.py b/pydantic_ai_slim/pydantic_ai/tools.py
index ca72cafbb5..e54b829bfb 100644
--- a/pydantic_ai_slim/pydantic_ai/tools.py
+++ b/pydantic_ai_slim/pydantic_ai/tools.py
@@ -480,7 +480,7 @@ class ToolDefinition:
When `False`, the model may be free to generate other properties or types (depending on the vendor).
When `None` (the default), the value will be inferred based on the compatibility of the parameters_json_schema.
- Note: this is currently only supported by OpenAI models.
+ Note: this is currently supported by OpenAI and Anthropic models.
"""
sequential: bool = False
diff --git a/pydantic_ai_slim/pyproject.toml b/pydantic_ai_slim/pyproject.toml
index 2059991bd0..9f625e80b4 100644
--- a/pydantic_ai_slim/pyproject.toml
+++ b/pydantic_ai_slim/pyproject.toml
@@ -71,7 +71,7 @@ openai = ["openai>=1.107.2"]
cohere = ["cohere>=5.18.0; platform_system != 'Emscripten'"]
vertexai = ["google-auth>=2.36.0", "requests>=2.32.2"]
google = ["google-genai>=1.51.0"]
-anthropic = ["anthropic>=0.70.0"]
+anthropic = ["anthropic>=0.74.0"]
groq = ["groq>=0.25.0"]
openrouter = ["openai>=2.8.0"]
mistral = ["mistralai>=1.9.10"]
diff --git a/pyproject.toml b/pyproject.toml
index 1b28c22490..9119c286cf 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -177,6 +177,7 @@ convention = "google"
[tool.ruff.lint.flake8-tidy-imports.banned-api]
"typing.TypedDict".msg = "Use typing_extensions.TypedDict instead."
+"typing.assert_never".msg = "Use typing_extensions.assert_never instead."
[tool.ruff.format]
# don't format python in docstrings, pytest-examples takes care of it
diff --git a/tests/models/anthropic/__init__.py b/tests/models/anthropic/__init__.py
new file mode 100644
index 0000000000..1cad9fd8b4
--- /dev/null
+++ b/tests/models/anthropic/__init__.py
@@ -0,0 +1 @@
+"""Tests for Anthropic models."""
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_auto-lossless-FORCES_TRUE].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_auto-lossless-FORCES_TRUE].yaml
new file mode 100644
index 0000000000..cf63b64596
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_auto-lossless-FORCES_TRUE].yaml
@@ -0,0 +1,73 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '308'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ type: json_schema
+ stream: false
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '460'
+ content-type:
+ - application/json
+ retry-after:
+ - '56'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: '{"location":"https://docs.pydantic.dev/logfire/"}'
+ type: text
+ id: msg_018rPREGnp2cPYrvQQ6uBAa2
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 154
+ output_tokens: 20
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_auto-lossy-FORCES_TRUE].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_auto-lossy-FORCES_TRUE].yaml
new file mode 100644
index 0000000000..742536abbd
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_auto-lossy-FORCES_TRUE].yaml
@@ -0,0 +1,74 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '358'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ username:
+ description: '{minLength: 3, pattern: ^[a-z]+$}'
+ type: string
+ required:
+ - username
+ type: object
+ type: json_schema
+ stream: false
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '432'
+ content-type:
+ - application/json
+ retry-after:
+ - '53'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: '{"username":"logfire"}'
+ type: text
+ id: msg_01MQCTfXMrrb99zBoJD95ksW
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 176
+ output_tokens: 9
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_false-lossless-FORCES_TRUE].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_false-lossless-FORCES_TRUE].yaml
new file mode 100644
index 0000000000..5f33328224
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_false-lossless-FORCES_TRUE].yaml
@@ -0,0 +1,73 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '308'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ type: json_schema
+ stream: false
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '460'
+ content-type:
+ - application/json
+ retry-after:
+ - '1'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: '{"location":"https://docs.pydantic.dev/logfire/"}'
+ type: text
+ id: msg_01MrDR1fEhtHhLje7Jq6R2b1
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 154
+ output_tokens: 20
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_false-lossy-FORCES_TRUE].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_false-lossy-FORCES_TRUE].yaml
new file mode 100644
index 0000000000..ab18e2ad7b
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_false-lossy-FORCES_TRUE].yaml
@@ -0,0 +1,74 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '358'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ username:
+ description: '{minLength: 3, pattern: ^[a-z]+$}'
+ type: string
+ required:
+ - username
+ type: object
+ type: json_schema
+ stream: false
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '432'
+ content-type:
+ - application/json
+ retry-after:
+ - '60'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: '{"username":"logfire"}'
+ type: text
+ id: msg_018rQpShnVwnDYxoHHmi713w
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 176
+ output_tokens: 9
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_true-lossless].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_true-lossless].yaml
new file mode 100644
index 0000000000..9dd36aad83
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_true-lossless].yaml
@@ -0,0 +1,73 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '308'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ type: json_schema
+ stream: false
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '460'
+ content-type:
+ - application/json
+ retry-after:
+ - '15'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: '{"location":"https://docs.pydantic.dev/logfire/"}'
+ type: text
+ id: msg_01CF2AS56DM5HsBwUCzKJqN9
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 154
+ output_tokens: 20
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_true-lossy].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_true-lossy].yaml
new file mode 100644
index 0000000000..6aad26f782
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_native-strict_true-lossy].yaml
@@ -0,0 +1,74 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '358'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ username:
+ description: '{minLength: 3, pattern: ^[a-z]+$}'
+ type: string
+ required:
+ - username
+ type: object
+ type: json_schema
+ stream: false
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '432'
+ content-type:
+ - application/json
+ retry-after:
+ - '10'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: '{"username":"logfire"}'
+ type: text
+ id: msg_01CCAG4DgsdTcF7BFuT6Ewd7
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 176
+ output_tokens: 9
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_auto-lossless-AUTO_ENABLED].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_auto-lossless-AUTO_ENABLED].yaml
new file mode 100644
index 0000000000..a7988e18aa
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_auto-lossless-AUTO_ENABLED].yaml
@@ -0,0 +1,96 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '452'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Simple schema with no validation constraints - fully strict-compatible.
+ input_schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ name: pydantic_questions_tool
+ strict: true
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1916'
+ content-type:
+ - application/json
+ retry-after:
+ - '57'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a modern observability platform created by Pydantic (the team behind the popular Pydantic Python library). It's designed to help developers monitor, debug, and understand their applications through logging, tracing, and metrics.
+
+ Key features of Logfire include:
+
+ 1. **Deep Python Integration** - Built with Python-first design, offering excellent integration with Python applications and frameworks
+
+ 2. **Structured Logging** - Uses structured logs that are easy to query and analyze, rather than plain text logs
+
+ 3. **Distributed Tracing** - Tracks requests as they flow through different services and components of your application
+
+ 4. **Performance Monitoring** - Helps identify bottlenecks and performance issues in your code
+
+ 5. **Built on OpenTelemetry** - Uses the open standard OpenTelemetry protocol, ensuring compatibility and avoiding vendor lock-in
+
+ 6. **User-Friendly Interface** - Provides an intuitive web interface for exploring and analyzing your application data
+
+ 7. **Pydantic Integration** - Naturally integrates with Pydantic models and validation, making it easy to see validation errors and data flow
+
+ Logfire aims to be a modern alternative to traditional logging and monitoring solutions, offering better developer experience and more powerful debugging capabilities, especially for Python applications. It competes with services like Datadog, New Relic, and other observability platforms but with a focus on simplicity and Python ecosystem integration.
+ type: text
+ id: msg_01RMhzpMpDVNWQfn8hd1rGSt
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 579
+ output_tokens: 315
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_auto-lossy-NOT_AUTO_ENABLED].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_auto-lossy-NOT_AUTO_ENABLED].yaml
new file mode 100644
index 0000000000..503dc696c6
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_auto-lossy-NOT_AUTO_ENABLED].yaml
@@ -0,0 +1,96 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '449'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Schema with validation constraints that get dropped - not strict-compatible.
+ input_schema:
+ properties:
+ username:
+ minLength: 3
+ pattern: ^[a-z]+$
+ type: string
+ required:
+ - username
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1815'
+ content-type:
+ - application/json
+ retry-after:
+ - '50'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a observability platform developed by Pydantic, the team behind the popular Python data validation library. It's designed to help developers monitor, debug, and understand their applications through logging, tracing, and metrics.
+
+ Key features of Logfire include:
+
+ 1. **Built on OpenTelemetry** - Uses the industry-standard OpenTelemetry protocol for collecting telemetry data
+
+ 2. **Python-first design** - Developed by the Pydantic team with Python developers in mind, offering excellent Python integration
+
+ 3. **Structured logging** - Provides structured, searchable logs rather than plain text logs
+
+ 4. **Distributed tracing** - Tracks requests as they flow through distributed systems and microservices
+
+ 5. **Performance monitoring** - Helps identify bottlenecks and performance issues in applications
+
+ 6. **Easy integration** - Designed to integrate smoothly with popular Python frameworks like FastAPI, Django, Flask, and of course Pydantic
+
+ 7. **Developer-friendly UI** - Offers an intuitive interface for exploring and analyzing telemetry data
+
+ Logfire aims to make observability more accessible and easier to implement, particularly for Python developers, while leveraging open standards like OpenTelemetry to avoid vendor lock-in. It competes with other observability platforms like Datadog, New Relic, and Honeycomb, but with a specific focus on the Python ecosystem.
+ type: text
+ id: msg_01HCkaLLk6ZrEMLQwioeiVgc
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 592
+ output_tokens: 313
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_false-lossless].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_false-lossless].yaml
new file mode 100644
index 0000000000..0db4e08f64
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_false-lossless].yaml
@@ -0,0 +1,88 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '409'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Simple schema with no validation constraints - fully strict-compatible.
+ input_schema:
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1623'
+ content-type:
+ - application/json
+ retry-after:
+ - '39'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a observability platform developed by Pydantic, the team behind the popular Python data validation library. It's designed to help developers monitor, debug, and understand their applications better.
+
+ Key features of Logfire include:
+
+ 1. **Observability**: Provides comprehensive logging, tracing, and metrics for Python applications
+ 2. **Integration with Pydantic**: Deep integration with Pydantic models for automatic instrumentation and validation tracking
+ 3. **Developer Experience**: Designed with a focus on ease of use and developer-friendly interfaces
+ 4. **Real-time Monitoring**: Allows you to see what's happening in your applications in real-time
+ 5. **Debugging Tools**: Helps identify and diagnose issues in production and development environments
+
+ Logfire aims to make observability more accessible and easier to implement for Python developers, especially those already using the Pydantic ecosystem. It competes with other observability platforms like Datadog, New Relic, and open-source solutions like OpenTelemetry.
+
+ The platform is relatively new (launched in 2024) and represents Pydantic's expansion beyond just data validation into the broader application monitoring space.
+ type: text
+ id: msg_01NtGrtZjVeodXfPppGQPWUy
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 572
+ output_tokens: 258
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_false-lossy].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_false-lossy].yaml
new file mode 100644
index 0000000000..e47b40360e
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_false-lossy].yaml
@@ -0,0 +1,96 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '449'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Schema with validation constraints that get dropped - not strict-compatible.
+ input_schema:
+ properties:
+ username:
+ minLength: 3
+ pattern: ^[a-z]+$
+ type: string
+ required:
+ - username
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1910'
+ content-type:
+ - application/json
+ retry-after:
+ - '31'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a observability platform developed by Pydantic (the team behind the popular Pydantic Python library for data validation). It's designed to provide modern application monitoring, logging, and debugging capabilities.
+
+ Key features of Logfire include:
+
+ 1. **Built for Python**: While it can work with other languages, it's particularly well-integrated with Python applications and the Pydantic ecosystem.
+
+ 2. **Observability**: It provides logging, tracing, and metrics in one platform, helping developers understand what's happening in their applications.
+
+ 3. **Developer-friendly**: Designed with a focus on great developer experience, making it easy to instrument code and understand the data.
+
+ 4. **OpenTelemetry-based**: Built on open standards (OpenTelemetry), so you're not locked into a proprietary system.
+
+ 5. **SQL querying**: Allows you to query your observability data using SQL, making it flexible for analysis.
+
+ 6. **Performance monitoring**: Helps track application performance, identify bottlenecks, and debug issues in production.
+
+ 7. **Integration with Pydantic**: Offers special integration with Pydantic models for automatic structured logging and validation tracking.
+
+ Logfire competes with other observability platforms like Datadog, New Relic, and Sentry, but with a particular focus on Python developers and modern development workflows. It was announced and released by Pydantic in 2024 as a commercial product to complement their open-source library.
+ type: text
+ id: msg_01KkFEEFvQ7ps3sgCNoMyG4q
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 592
+ output_tokens: 330
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_true-lossless].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_true-lossless].yaml
new file mode 100644
index 0000000000..00e04647a6
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_true-lossless].yaml
@@ -0,0 +1,96 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '452'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Simple schema with no validation constraints - fully strict-compatible.
+ input_schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ name: pydantic_questions_tool
+ strict: true
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1730'
+ content-type:
+ - application/json
+ retry-after:
+ - '32'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a observability platform created by Pydantic, the team behind the popular Python data validation library. It's designed to help developers monitor, debug, and understand their applications through logging, tracing, and metrics.
+
+ Key features of Logfire include:
+
+ 1. **Built for Python**: Designed with Python developers in mind, with deep integration with the Python ecosystem
+
+ 2. **Structured Logging**: Provides structured, searchable logs that make it easier to find and analyze information
+
+ 3. **Distributed Tracing**: Tracks requests as they flow through different services and components of your application
+
+ 4. **Performance Monitoring**: Helps identify bottlenecks and performance issues in your code
+
+ 5. **User-Friendly Interface**: Offers an intuitive web interface for exploring and analyzing your application's behavior
+
+ 6. **Integration with Pydantic**: Works seamlessly with Pydantic models for automatic instrumentation and validation
+
+ 7. **OpenTelemetry Compatible**: Built on open standards, making it interoperable with other observability tools
+
+ Logfire aims to make observability more accessible and developer-friendly, particularly for Python applications, by providing a modern platform that combines the power of structured logging with distributed tracing capabilities.
+ type: text
+ id: msg_01PqRzJ3Pe7ZmENBDcfm8YoN
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 579
+ output_tokens: 269
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_true-lossy].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_true-lossy].yaml
new file mode 100644
index 0000000000..bb1056127d
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[supported_tool-strict_true-lossy].yaml
@@ -0,0 +1,97 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '507'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Schema with validation constraints that get dropped - not strict-compatible.
+ input_schema:
+ additionalProperties: false
+ properties:
+ username:
+ description: '{minLength: 3, pattern: ^[a-z]+$}'
+ type: string
+ required:
+ - username
+ type: object
+ name: pydantic_questions_tool
+ strict: true
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1717'
+ content-type:
+ - application/json
+ retry-after:
+ - '15'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a observability platform created by Pydantic (the company behind the popular Python data validation library). It's designed to help developers monitor, debug, and understand their applications through logging, tracing, and performance monitoring.
+
+ Key features of Logfire include:
+
+ 1. **Deep Python Integration** - Built by the Pydantic team, it has excellent support for Python applications and integrates seamlessly with Pydantic models
+
+ 2. **OpenTelemetry-based** - Uses the OpenTelemetry standard for collecting telemetry data, making it compatible with other tools and ensuring vendor neutrality
+
+ 3. **Structured Logging** - Emphasizes structured, queryable logs rather than plain text logs
+
+ 4. **Performance Monitoring** - Tracks application performance, slow queries, and bottlenecks
+
+ 5. **Distributed Tracing** - Helps track requests across multiple services in distributed systems
+
+ 6. **Real-time Analysis** - Provides tools to query and analyze logs and traces in real-time
+
+ 7. **Developer-friendly** - Designed with a focus on developer experience, making it easy to integrate and use
+
+ Logfire aims to be a modern alternative to traditional logging and monitoring solutions, with a particular focus on Python applications and frameworks like FastAPI, Django, and Flask.
+ type: text
+ id: msg_01EgHi65sXgVr95fHL6u6wEp
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 600
+ output_tokens: 286
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_auto-lossless-MODEL_UNSUPPORTED].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_auto-lossless-MODEL_UNSUPPORTED].yaml
new file mode 100644
index 0000000000..ad14f48f29
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_auto-lossless-MODEL_UNSUPPORTED].yaml
@@ -0,0 +1,88 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '438'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-0
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Simple schema with no validation constraints - fully strict-compatible.
+ input_schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1275'
+ content-type:
+ - application/json
+ retry-after:
+ - '7'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ I don't have a relevant tool available to look up information about "Logfire" for you. Based on my knowledge, Logfire could refer to several things:
+
+ 1. **Pydantic Logfire** - A relatively new observability platform/tool developed by the Pydantic team for monitoring and debugging Python applications, particularly those using Pydantic models and FastAPI.
+
+ 2. **Other logging/monitoring tools** - There may be other software tools or services with similar names in the logging, monitoring, or analytics space.
+
+ Could you provide more context about which Logfire you're asking about? For example:
+ - Are you asking about a specific software tool or service?
+ - Is this related to Python development, logging, monitoring, or another domain?
+ - Do you have a particular use case or context in mind?
+
+ This would help me give you a more targeted and useful answer.
+ type: text
+ id: msg_01X4JALR6oh523WrFjHCbcLz
+ model: claude-sonnet-4-20250514
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 396
+ output_tokens: 195
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_auto-lossy-MODEL_UNSUPPORTED].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_auto-lossy-MODEL_UNSUPPORTED].yaml
new file mode 100644
index 0000000000..da35c96642
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_auto-lossy-MODEL_UNSUPPORTED].yaml
@@ -0,0 +1,109 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '449'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-0
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Schema with validation constraints that get dropped - not strict-compatible.
+ input_schema:
+ properties:
+ username:
+ minLength: 3
+ pattern: ^[a-z]+$
+ type: string
+ required:
+ - username
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '2060'
+ content-type:
+ - application/json
+ retry-after:
+ - '1'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a modern observability platform developed by Pydantic (the team behind the popular Pydantic Python library). Here are the key aspects of Logfire:
+
+ ## What is Logfire?
+
+ **Logfire** is an observability and monitoring platform designed to help developers track, debug, and optimize their applications. It provides comprehensive logging, metrics, and tracing capabilities in a unified interface.
+
+ ## Key Features:
+
+ 1. **Structured Logging** - Captures detailed, searchable logs with rich metadata
+ 2. **Distributed Tracing** - Tracks requests across microservices and complex systems
+ 3. **Metrics & Analytics** - Performance monitoring and custom metrics
+ 4. **Real-time Monitoring** - Live dashboards and alerting
+ 5. **Python-first Design** - Optimized for Python applications with deep integration
+
+ ## Integration Benefits:
+
+ - **Pydantic Integration** - Seamless validation and serialization of log data
+ - **FastAPI Support** - Built-in instrumentation for FastAPI applications
+ - **OpenTelemetry Compatible** - Works with standard observability protocols
+ - **Developer Experience** - Clean APIs and intuitive dashboard interface
+
+ ## Use Cases:
+
+ - Debugging production applications
+ - Performance optimization
+ - API monitoring
+ - Error tracking and alerting
+ - Understanding user behavior and system performance
+
+ Logfire aims to provide a more developer-friendly alternative to traditional observability tools, with particular strength in Python ecosystems. It's designed to be easy to set up while providing powerful insights into application behavior.
+
+ Would you like me to elaborate on any specific aspect of Logfire?
+ type: text
+ id: msg_01E1BppEW5zm8JvM8us83rwk
+ model: claude-sonnet-4-20250514
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 409
+ output_tokens: 355
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_false-lossless].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_false-lossless].yaml
new file mode 100644
index 0000000000..5d062726a4
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_false-lossless].yaml
@@ -0,0 +1,96 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '409'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-0
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Simple schema with no validation constraints - fully strict-compatible.
+ input_schema:
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1644'
+ content-type:
+ - application/json
+ retry-after:
+ - '57'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ I don't have access to tools that can help me answer general questions about what Logfire is. However, I can tell you that Logfire is a Python observability platform developed by Pydantic. It's designed to help developers monitor, debug, and understand their Python applications through logging, metrics, and tracing.
+
+ Key features of Logfire include:
+
+ 1. **Easy integration** - Simple setup with minimal configuration
+ 2. **Structured logging** - Built-in support for structured, searchable logs
+ 3. **Performance monitoring** - Track application performance and identify bottlenecks
+ 4. **Real-time dashboards** - Visual insights into your application's behavior
+ 5. **Pydantic integration** - Seamless integration with Pydantic models and validation
+ 6. **OpenTelemetry compatible** - Built on industry standards for observability
+
+ Logfire is particularly useful for:
+ - Debugging production issues
+ - Monitoring application health
+ - Understanding user behavior
+ - Performance optimization
+ - Compliance and audit trails
+
+ It's designed to be developer-friendly while providing powerful observability capabilities for Python applications of all sizes.
+
+ Is there something specific about Logfire you'd like to know more about?
+ type: text
+ id: msg_01V2sbrdxQtt6zPB3LUJSuVP
+ model: claude-sonnet-4-20250514
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 389
+ output_tokens: 265
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_false-lossy].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_false-lossy].yaml
new file mode 100644
index 0000000000..a4228ab787
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_false-lossy].yaml
@@ -0,0 +1,96 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '449'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-0
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Schema with validation constraints that get dropped - not strict-compatible.
+ input_schema:
+ properties:
+ username:
+ minLength: 3
+ pattern: ^[a-z]+$
+ type: string
+ required:
+ - username
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1766'
+ content-type:
+ - application/json
+ retry-after:
+ - '47'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a Python observability platform developed by Pydantic. It's designed to help developers monitor, debug, and understand their Python applications through comprehensive logging, tracing, and monitoring capabilities.
+
+ Key features of Logfire include:
+
+ 1. **Structured Logging** - Provides rich, structured logging with automatic serialization of Python objects
+ 2. **Distributed Tracing** - Tracks requests across multiple services and components
+ 3. **Performance Monitoring** - Monitors application performance and identifies bottlenecks
+ 4. **Real-time Dashboard** - Offers a web-based interface for viewing logs and metrics
+ 5. **Python-first Design** - Built specifically for Python applications with deep integration
+ 6. **Pydantic Integration** - Seamless integration with Pydantic models for automatic validation and serialization
+
+ Logfire aims to be a modern alternative to traditional logging solutions, offering better developer experience and more powerful debugging capabilities for Python applications. It's particularly useful for:
+
+ - Web applications and APIs
+ - Microservices architectures
+ - Data processing pipelines
+ - Any Python application where observability is important
+
+ The platform provides both self-hosted and cloud-hosted options, making it accessible for different deployment scenarios and organizational needs.
+ type: text
+ id: msg_01QwbtcfbXnW5aY53Jd1e3z3
+ model: claude-sonnet-4-20250514
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 409
+ output_tokens: 271
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_true-lossless-MODEL_IGNORES].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_true-lossless-MODEL_IGNORES].yaml
new file mode 100644
index 0000000000..ceaf3f7f5a
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_true-lossless-MODEL_IGNORES].yaml
@@ -0,0 +1,87 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '438'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-0
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Simple schema with no validation constraints - fully strict-compatible.
+ input_schema:
+ additionalProperties: false
+ properties:
+ location:
+ type: string
+ required:
+ - location
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1477'
+ content-type:
+ - application/json
+ retry-after:
+ - '21'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ I don't have a relevant tool available to look up information about Logfire. Based on my general knowledge, Logfire is a Python observability platform developed by Pydantic. It's designed to help developers monitor, debug, and understand their Python applications by providing:
+
+ 1. **Structured logging** - Better organized and searchable logs
+ 2. **Performance monitoring** - Track application performance and identify bottlenecks
+ 3. **Error tracking** - Catch and analyze exceptions and errors
+ 4. **Distributed tracing** - Follow requests across multiple services
+ 5. **Integration with Pydantic** - Seamless integration with Pydantic models for validation and serialization
+
+ Logfire aims to provide comprehensive observability for Python applications with a focus on developer experience and ease of use. It's particularly well-suited for applications that already use Pydantic for data validation.
+
+ If you need more specific or up-to-date information about Logfire, I'd recommend checking the official Pydantic documentation or the Logfire project repository.
+ type: text
+ id: msg_01K64oYKRrYw9aGVHVAyfCEZ
+ model: claude-sonnet-4-20250514
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 396
+ output_tokens: 229
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_true-lossy-MODEL_IGNORES].yaml b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_true-lossy-MODEL_IGNORES].yaml
new file mode 100644
index 0000000000..af670aeed5
--- /dev/null
+++ b/tests/models/anthropic/cassettes/test_output/test_combinations_live_api[unsupported_tool-strict_true-lossy-MODEL_IGNORES].yaml
@@ -0,0 +1,104 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '493'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: what is Logfire?
+ type: text
+ role: user
+ model: claude-sonnet-4-0
+ stream: false
+ tool_choice:
+ type: auto
+ tools:
+ - description: Schema with validation constraints that get dropped - not strict-compatible.
+ input_schema:
+ additionalProperties: false
+ properties:
+ username:
+ description: '{minLength: 3, pattern: ^[a-z]+$}'
+ type: string
+ required:
+ - username
+ type: object
+ name: pydantic_questions_tool
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '1939'
+ content-type:
+ - application/json
+ retry-after:
+ - '15'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: |-
+ Logfire is a Python observability platform developed by Pydantic. It's designed to help developers monitor, debug, and understand their Python applications through comprehensive logging, tracing, and metrics collection.
+
+ Key features of Logfire include:
+
+ 1. **Structured Logging**: Provides rich, structured logging capabilities that go beyond traditional text-based logs
+
+ 2. **Distributed Tracing**: Tracks requests across multiple services and components to help understand application flow and performance
+
+ 3. **Metrics Collection**: Gathers performance metrics and application statistics
+
+ 4. **Integration with Pydantic**: Built by the same team that creates Pydantic, so it has excellent integration with Pydantic models and validation
+
+ 5. **Developer Experience**: Focuses on providing a smooth developer experience with intuitive APIs and good documentation
+
+ 6. **Real-time Monitoring**: Offers real-time insights into application behavior and performance
+
+ 7. **OpenTelemetry Compatible**: Built on open standards, making it interoperable with other observability tools
+
+ Logfire is particularly useful for:
+ - Debugging complex applications
+ - Performance monitoring
+ - Understanding user behavior
+ - Tracking errors and exceptions
+ - Monitoring API performance
+ - Gaining insights into application usage patterns
+
+ It's designed to be easy to integrate into existing Python applications and provides both a hosted service and tools for self-hosting, making it accessible for different deployment scenarios.
+ type: text
+ id: msg_01Szkw6WkooAVpihiJFgG5nM
+ model: claude-sonnet-4-20250514
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 417
+ output_tokens: 304
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/anthropic/conftest.py b/tests/models/anthropic/conftest.py
new file mode 100644
index 0000000000..6edb7d19f2
--- /dev/null
+++ b/tests/models/anthropic/conftest.py
@@ -0,0 +1,95 @@
+"""Shared fixtures for Anthropic model tests."""
+
+from __future__ import annotations as _annotations
+
+from collections.abc import Callable
+from functools import cache
+
+import pytest
+
+from ...conftest import try_import
+from ..test_anthropic import MockAnthropic, completion_message
+
+with try_import() as imports_successful:
+ from anthropic import AsyncAnthropic
+ from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaToolUseBlock, BetaUsage
+ from pydantic import BaseModel
+
+ from pydantic_ai.models.anthropic import AnthropicModel
+ from pydantic_ai.providers.anthropic import AnthropicProvider
+
+AnthropicModelFactory = Callable[..., AnthropicModel]
+
+
+# Model factory fixture for live API tests
+@pytest.fixture
+def anthropic_model(anthropic_api_key: str) -> AnthropicModelFactory:
+ """Factory to create Anthropic models with custom configuration."""
+
+ @cache
+ def _create_model(
+ model_name: str,
+ api_key: str | None = None,
+ ) -> AnthropicModel:
+ """Create an AnthropicModel with the specified configuration.
+
+ Args:
+ model_name: Model name like 'claude-sonnet-4-5'
+ api_key: Optional API key, defaults to the fixture's anthropic_api_key
+ """
+ return AnthropicModel(
+ model_name,
+ provider=AnthropicProvider(api_key=api_key or anthropic_api_key),
+ )
+
+ return _create_model
+
+
+# Mock model fixtures for unit tests
+@pytest.fixture
+def mock_sonnet_4_5(allow_model_requests: None) -> tuple[AnthropicModel, AsyncAnthropic]:
+ """Mock claude-sonnet-4-5 model for unit tests."""
+ c = completion_message(
+ [BetaTextBlock(text='{"city": "Mexico City", "country": "Mexico"}', type='text')],
+ BetaUsage(input_tokens=5, output_tokens=10),
+ )
+ mock_client = MockAnthropic.create_mock(c)
+ model = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ return model, mock_client
+
+
+# Schema fixtures
+@pytest.fixture
+def city_location_schema() -> type[BaseModel]:
+ """Standard CityLocation schema for testing."""
+
+ class CityLocation(BaseModel):
+ """A city and its country."""
+
+ city: str
+ country: str
+
+ return CityLocation
+
+
+# Mock response fixtures
+@pytest.fixture
+def weather_tool_responses() -> list[BetaMessage]:
+ """Standard mock responses for weather tool tests."""
+ return [
+ completion_message(
+ [
+ BetaToolUseBlock(
+ id='tool_123',
+ name='get_weather',
+ input={'location': 'Paris'},
+ type='tool_use',
+ )
+ ],
+ BetaUsage(input_tokens=5, output_tokens=10),
+ ),
+ completion_message(
+ [BetaTextBlock(text='The weather in Paris is sunny.', type='text')],
+ BetaUsage(input_tokens=3, output_tokens=5),
+ ),
+ ]
diff --git a/tests/models/anthropic/test_output.py b/tests/models/anthropic/test_output.py
new file mode 100644
index 0000000000..74306d2f89
--- /dev/null
+++ b/tests/models/anthropic/test_output.py
@@ -0,0 +1,660 @@
+"""Tests for Anthropic native JSON schema output and strict tool support.
+
+This module tests the implementation of Anthropic's structured outputs feature,
+including native JSON schema output for final responses and strict tool calling.
+
+Test organization:
+1. Strict Tools - Model Support
+2. Strict Tools - Schema Compatibility
+3. Native Output - Model Support
+4. Auto Mode Selection
+5. Beta Header Management
+6. Comprehensive Parametrized Tests - All Combinations (24 test cases)
+"""
+
+from __future__ import annotations as _annotations
+
+from collections.abc import Callable
+from dataclasses import dataclass
+from typing import Annotated, Literal
+
+import httpx
+import pytest
+from inline_snapshot import snapshot
+from pydantic import BaseModel, Field
+from typing_extensions import assert_never
+
+from pydantic_ai import Agent, Tool
+from pydantic_ai.exceptions import UserError
+from pydantic_ai.output import NativeOutput
+
+from ...conftest import try_import
+from ..test_anthropic import MockAnthropic, get_mock_chat_completion_kwargs
+
+with try_import() as imports_successful:
+ from anthropic import AsyncAnthropic, omit as OMIT
+ from anthropic.types.beta import BetaMessage, BetaTextBlock, BetaUsage
+
+ from pydantic_ai.models.anthropic import AnthropicModel, AnthropicModelSettings
+ from pydantic_ai.providers.anthropic import AnthropicProvider
+
+from ..test_anthropic import completion_message
+
+pytestmark = [
+ pytest.mark.skipif(not imports_successful(), reason='anthropic not installed'),
+ pytest.mark.anyio,
+ pytest.mark.vcr,
+]
+
+
+# =============================================================================
+# STRICT TOOLS - Model Support
+# =============================================================================
+
+
+def test_strict_tools_supported_model_auto_enabled(
+ allow_model_requests: None, weather_tool_responses: list[BetaMessage]
+):
+ """sonnet-4-5: strict=None + compatible schema → auto strict=True + beta header."""
+ mock_client = MockAnthropic.create_mock(weather_tool_responses)
+ model = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(model)
+
+ @agent.tool_plain
+ def get_weather(location: str) -> str:
+ return f'Weather in {location}'
+
+ agent.run_sync('What is the weather in Paris?')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
+ tools = completion_kwargs['tools']
+ betas = completion_kwargs['betas']
+
+ assert tools == snapshot(
+ [
+ {
+ 'name': 'get_weather',
+ 'description': '',
+ 'input_schema': {
+ 'type': 'object',
+ 'properties': {'location': {'type': 'string'}},
+ 'additionalProperties': False,
+ 'required': ['location'],
+ },
+ # strict is set automatically because the model supports it
+ 'strict': True,
+ }
+ ]
+ )
+ assert betas == snapshot(['structured-outputs-2025-11-13'])
+
+
+def test_strict_tools_supported_model_explicit_false(
+ allow_model_requests: None, weather_tool_responses: list[BetaMessage]
+):
+ """sonnet-4-5: strict=False → no strict field, no beta header."""
+ mock_client = MockAnthropic.create_mock(weather_tool_responses)
+ model = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(model)
+
+ @agent.tool_plain(strict=False)
+ def get_weather(location: str) -> str:
+ return f'Weather in {location}'
+
+ agent.run_sync('What is the weather in Paris?')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
+ tools = completion_kwargs['tools']
+ betas = completion_kwargs.get('betas')
+
+ assert 'strict' not in tools[0]
+ assert tools[0]['input_schema']['additionalProperties'] is False
+ assert betas is OMIT
+
+
+def test_strict_tools_unsupported_model_no_strict_sent(
+ allow_model_requests: None, weather_tool_responses: list[BetaMessage]
+):
+ """sonnet-4-0: strict=None → no strict field, no beta header (model doesn't support strict)."""
+ mock_client = MockAnthropic.create_mock(weather_tool_responses)
+ model = AnthropicModel('claude-sonnet-4-0', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(model)
+
+ @agent.tool_plain
+ def get_weather(location: str) -> str:
+ return f'Weather in {location}'
+
+ agent.run_sync('What is the weather in Paris?')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
+ tools = completion_kwargs['tools']
+ betas = completion_kwargs.get('betas')
+
+ # sonnet-4-0 doesn't support strict tools, so no strict field or beta header
+ assert 'strict' not in tools[0]
+ assert betas is OMIT
+
+
+# =============================================================================
+# STRICT TOOLS - Schema Compatibility
+# =============================================================================
+
+
+def test_strict_tools_incompatible_schema_not_auto_enabled(allow_model_requests: None):
+ """sonnet-4-5: strict=None + lossy schema → no strict field, no beta header."""
+ mock_client = MockAnthropic.create_mock(
+ completion_message([BetaTextBlock(text='Sure', type='text')], BetaUsage(input_tokens=5, output_tokens=2))
+ )
+ model = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(model)
+
+ @agent.tool_plain
+ def constrained_tool(username: Annotated[str, Field(min_length=3)]) -> str: # pragma: no cover
+ return username
+
+ agent.run_sync('Test')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
+ tools = completion_kwargs['tools']
+ betas = completion_kwargs.get('betas')
+
+ # Lossy schema: strict is not auto-enabled, so no strict field
+ assert 'strict' not in tools[0]
+ # Schema still has the constraint (not removed)
+ assert tools[0]['input_schema']['properties']['username']['minLength'] == 3
+ assert betas is OMIT
+
+
+# =============================================================================
+# NATIVE OUTPUT - Model Support
+# =============================================================================
+
+
+def test_native_output_supported_model(
+ allow_model_requests: None,
+ mock_sonnet_4_5: tuple[AnthropicModel, AsyncAnthropic],
+ city_location_schema: type[BaseModel],
+):
+ """sonnet-4-5: NativeOutput → strict=True + beta header + output_format."""
+ model, mock_client = mock_sonnet_4_5
+ agent = Agent(model, output_type=NativeOutput(city_location_schema))
+
+ agent.run_sync('What is the capital of France?')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[-1]
+ output_format = completion_kwargs['output_format']
+ betas = completion_kwargs['betas']
+
+ assert output_format['type'] == 'json_schema'
+ assert output_format['schema']['type'] == 'object'
+ assert betas == snapshot(['structured-outputs-2025-11-13'])
+
+
+def test_native_output_unsupported_model_raises_error(
+ allow_model_requests: None, city_location_schema: type[BaseModel]
+):
+ """sonnet-4-0: NativeOutput → raises UserError."""
+ mock_client = MockAnthropic.create_mock(
+ completion_message([BetaTextBlock(text='test', type='text')], BetaUsage(input_tokens=5, output_tokens=2))
+ )
+ model = AnthropicModel('claude-sonnet-4-0', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(model, output_type=NativeOutput(city_location_schema))
+
+ with pytest.raises(UserError, match='Native structured output is not supported by this model'):
+ agent.run_sync('What is the capital of France?')
+
+
+# =============================================================================
+# AUTO MODE Selection
+# =============================================================================
+
+
+def test_auto_mode_model_profile_check(allow_model_requests: None):
+ """Verify profile.supports_json_schema_output is set correctly."""
+ mock_client = MockAnthropic.create_mock(
+ completion_message([BetaTextBlock(text='test', type='text')], BetaUsage(input_tokens=5, output_tokens=2))
+ )
+
+ sonnet_4_5 = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ assert sonnet_4_5.profile.supports_json_schema_output is True
+
+ sonnet_4_0 = AnthropicModel('claude-sonnet-4-0', provider=AnthropicProvider(anthropic_client=mock_client))
+ assert sonnet_4_0.profile.supports_json_schema_output is False
+
+
+# =============================================================================
+# BETA HEADER Management
+# =============================================================================
+
+
+def test_beta_header_merge_custom_headers(
+ allow_model_requests: None,
+ mock_sonnet_4_5: tuple[AnthropicModel, AsyncAnthropic],
+ city_location_schema: type[BaseModel],
+):
+ """Custom beta headers merge with structured-outputs beta."""
+ model, mock_client = mock_sonnet_4_5
+
+ agent = Agent(
+ model,
+ output_type=NativeOutput(city_location_schema),
+ model_settings=AnthropicModelSettings(extra_headers={'anthropic-beta': 'custom-feature-1, custom-feature-2'}),
+ )
+ agent.run_sync('What is the capital of France?')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[-1]
+ betas = completion_kwargs['betas']
+
+ assert betas == snapshot(['custom-feature-1', 'custom-feature-2', 'structured-outputs-2025-11-13'])
+
+
+# =============================================================================
+# COMPREHENSIVE PARAMETRIZED TESTS - All Combinations
+# =============================================================================
+
+
+class LosslessSchema(BaseModel):
+ """Simple schema with no validation constraints - fully strict-compatible."""
+
+ location: str
+
+
+class LossySchema(BaseModel):
+ """Schema with validation constraints that get dropped - not strict-compatible."""
+
+ username: Annotated[str, Field(min_length=3, pattern=r'^[a-z]+$')]
+
+
+@dataclass
+class StrictTestCase:
+ """Defines a test case for strict mode behavior across models, schemas, and modes."""
+
+ name: str
+ model_name: str
+ strict: bool | None
+ schema_type: Literal['lossless', 'lossy']
+ mode: Literal['tool', 'native']
+ expect_strict_field: bool | None # None means expect error
+ expect_beta_header: bool | None # None means expect error
+ expect_error: type[Exception] | None = None
+
+
+# =============================================================================
+# TOOL CASES - Supported Model (claude-sonnet-4-5)
+# =============================================================================
+
+SUPPORTED_TOOL_STRICT_TRUE = [
+ StrictTestCase(
+ name='supported_tool-strict_true-lossless',
+ model_name='claude-sonnet-4-5',
+ strict=True,
+ schema_type='lossless',
+ mode='tool',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+ StrictTestCase(
+ name='supported_tool-strict_true-lossy',
+ model_name='claude-sonnet-4-5',
+ strict=True,
+ schema_type='lossy',
+ mode='tool',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+]
+
+SUPPORTED_TOOL_STRICT_NONE = [
+ StrictTestCase(
+ name='supported_tool-strict_auto-lossless-AUTO_ENABLED',
+ model_name='claude-sonnet-4-5',
+ strict=None,
+ schema_type='lossless',
+ mode='tool',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+ StrictTestCase(
+ name='supported_tool-strict_auto-lossy-NOT_AUTO_ENABLED',
+ model_name='claude-sonnet-4-5',
+ strict=None,
+ schema_type='lossy',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+]
+
+SUPPORTED_TOOL_STRICT_FALSE = [
+ StrictTestCase(
+ name='supported_tool-strict_false-lossless',
+ model_name='claude-sonnet-4-5',
+ strict=False,
+ schema_type='lossless',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+ StrictTestCase(
+ name='supported_tool-strict_false-lossy',
+ model_name='claude-sonnet-4-5',
+ strict=False,
+ schema_type='lossy',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+]
+
+
+# =============================================================================
+# TOOL CASES - Unsupported Model (claude-sonnet-4-0)
+# =============================================================================
+
+UNSUPPORTED_TOOL_STRICT_TRUE = [
+ StrictTestCase(
+ name='unsupported_tool-strict_true-lossless-MODEL_IGNORES',
+ model_name='claude-sonnet-4-0',
+ strict=True,
+ schema_type='lossless',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+ StrictTestCase(
+ name='unsupported_tool-strict_true-lossy-MODEL_IGNORES',
+ model_name='claude-sonnet-4-0',
+ strict=True,
+ schema_type='lossy',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+]
+
+UNSUPPORTED_TOOL_STRICT_NONE = [
+ StrictTestCase(
+ name='unsupported_tool-strict_auto-lossless-MODEL_UNSUPPORTED',
+ model_name='claude-sonnet-4-0',
+ strict=None,
+ schema_type='lossless',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+ StrictTestCase(
+ name='unsupported_tool-strict_auto-lossy-MODEL_UNSUPPORTED',
+ model_name='claude-sonnet-4-0',
+ strict=None,
+ schema_type='lossy',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+]
+
+UNSUPPORTED_TOOL_STRICT_FALSE = [
+ StrictTestCase(
+ name='unsupported_tool-strict_false-lossless',
+ model_name='claude-sonnet-4-0',
+ strict=False,
+ schema_type='lossless',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+ StrictTestCase(
+ name='unsupported_tool-strict_false-lossy',
+ model_name='claude-sonnet-4-0',
+ strict=False,
+ schema_type='lossy',
+ mode='tool',
+ expect_strict_field=False,
+ expect_beta_header=False,
+ ),
+]
+
+
+# =============================================================================
+# NATIVE OUTPUT CASES - Supported Model (claude-sonnet-4-5)
+# =============================================================================
+
+SUPPORTED_NATIVE_STRICT_TRUE = [
+ StrictTestCase(
+ name='supported_native-strict_true-lossless',
+ model_name='claude-sonnet-4-5',
+ strict=True,
+ schema_type='lossless',
+ mode='native',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+ StrictTestCase(
+ name='supported_native-strict_true-lossy',
+ model_name='claude-sonnet-4-5',
+ strict=True,
+ schema_type='lossy',
+ mode='native',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+]
+
+SUPPORTED_NATIVE_STRICT_NONE = [
+ StrictTestCase(
+ name='supported_native-strict_auto-lossless-FORCES_TRUE',
+ model_name='claude-sonnet-4-5',
+ strict=None,
+ schema_type='lossless',
+ mode='native',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+ StrictTestCase(
+ name='supported_native-strict_auto-lossy-FORCES_TRUE',
+ model_name='claude-sonnet-4-5',
+ strict=None,
+ schema_type='lossy',
+ mode='native',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+]
+
+SUPPORTED_NATIVE_STRICT_FALSE = [
+ StrictTestCase(
+ name='supported_native-strict_false-lossless-FORCES_TRUE',
+ model_name='claude-sonnet-4-5',
+ strict=False,
+ schema_type='lossless',
+ mode='native',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+ StrictTestCase(
+ name='supported_native-strict_false-lossy-FORCES_TRUE',
+ model_name='claude-sonnet-4-5',
+ strict=False,
+ schema_type='lossy',
+ mode='native',
+ expect_strict_field=True,
+ expect_beta_header=True,
+ ),
+]
+
+
+# =============================================================================
+# NATIVE OUTPUT CASES - Unsupported Model (claude-sonnet-4-0)
+# =============================================================================
+
+UNSUPPORTED_NATIVE_ALL = [
+ StrictTestCase(
+ name='unsupported_native-strict_true-lossless-RAISES',
+ model_name='claude-sonnet-4-0',
+ strict=True,
+ schema_type='lossless',
+ mode='native',
+ expect_strict_field=None,
+ expect_beta_header=None,
+ expect_error=UserError,
+ ),
+ StrictTestCase(
+ name='unsupported_native-strict_true-lossy-RAISES',
+ model_name='claude-sonnet-4-0',
+ strict=True,
+ schema_type='lossy',
+ mode='native',
+ expect_strict_field=None,
+ expect_beta_header=None,
+ expect_error=UserError,
+ ),
+ StrictTestCase(
+ name='unsupported_native-strict_auto-lossless-RAISES',
+ model_name='claude-sonnet-4-0',
+ strict=None,
+ schema_type='lossless',
+ mode='native',
+ expect_strict_field=None,
+ expect_beta_header=None,
+ expect_error=UserError,
+ ),
+ StrictTestCase(
+ name='unsupported_native-strict_auto-lossy-RAISES',
+ model_name='claude-sonnet-4-0',
+ strict=None,
+ schema_type='lossy',
+ mode='native',
+ expect_strict_field=None,
+ expect_beta_header=None,
+ expect_error=UserError,
+ ),
+ StrictTestCase(
+ name='unsupported_native-strict_false-lossless-RAISES',
+ model_name='claude-sonnet-4-0',
+ strict=False,
+ schema_type='lossless',
+ mode='native',
+ expect_strict_field=None,
+ expect_beta_header=None,
+ expect_error=UserError,
+ ),
+ StrictTestCase(
+ name='unsupported_native-strict_false-lossy-RAISES',
+ model_name='claude-sonnet-4-0',
+ strict=False,
+ schema_type='lossy',
+ mode='native',
+ expect_strict_field=None,
+ expect_beta_header=None,
+ expect_error=UserError,
+ ),
+]
+
+
+# =============================================================================
+# Combine All Cases
+# =============================================================================
+
+ALL_CASES = (
+ SUPPORTED_TOOL_STRICT_TRUE
+ + SUPPORTED_TOOL_STRICT_NONE
+ + SUPPORTED_TOOL_STRICT_FALSE
+ + UNSUPPORTED_TOOL_STRICT_TRUE
+ + UNSUPPORTED_TOOL_STRICT_NONE
+ + UNSUPPORTED_TOOL_STRICT_FALSE
+ + SUPPORTED_NATIVE_STRICT_TRUE
+ + SUPPORTED_NATIVE_STRICT_NONE
+ + SUPPORTED_NATIVE_STRICT_FALSE
+ + UNSUPPORTED_NATIVE_ALL
+)
+
+
+# =============================================================================
+# Parametrized Test
+# =============================================================================
+
+ANTHROPIC_MODEL_FIXTURE = Callable[..., AnthropicModel]
+
+
+def create_header_verification_hook(case: StrictTestCase):
+ """Create an httpx event hook to verify request headers.
+
+ NOTE: the vcr config doesn't record anthropic-beta headers.
+ This hook allows us to verify them in live API tests.
+
+ TODO: remove when structured outputs is generally available and no longer a beta feature.
+ """
+
+ async def verify_headers(request: httpx.Request):
+ # Only verify for messages endpoint (the actual API calls)
+ if '/messages' in str(request.url): # pragma: no branch
+ beta_header = request.headers.get('anthropic-beta', '')
+
+ if case.expect_beta_header:
+ assert 'structured-outputs-2025-11-13' in beta_header, (
+ f'Expected beta header for {case.name}, got: {beta_header}'
+ )
+ else:
+ assert 'structured-outputs-2025-11-13' not in beta_header, (
+ f'Did not expect beta header for {case.name}, got: {beta_header}'
+ )
+
+ return verify_headers
+
+
+@pytest.mark.parametrize('case', ALL_CASES, ids=lambda c: c.name)
+@pytest.mark.vcr(record_mode='new_episodes') # Allow recording new API interactions
+def test_combinations_live_api(
+ case: StrictTestCase,
+ allow_model_requests: None,
+ anthropic_model: ANTHROPIC_MODEL_FIXTURE,
+) -> None:
+ """Test strict mode across all combinations of models, schemas, and output modes with live API."""
+ # live API model factory
+ model = anthropic_model(case.model_name)
+
+ # Add httpx event hook to verify headers on requests
+ if case.expect_beta_header is not None:
+ hook = create_header_verification_hook(case)
+ model.client._client.event_hooks['request'].append(hook) # pyright: ignore[reportPrivateUsage]
+
+ assert model.profile.supports_json_schema_output == (case.model_name == 'claude-sonnet-4-5')
+
+ if case.mode == 'tool':
+ if case.schema_type == 'lossless':
+
+ def pydantic_questions_tool( # pyright: ignore[reportRedeclaration]
+ question: LosslessSchema,
+ ) -> str: # pragma: no cover
+ return 'Ask Samuel'
+ else: # lossy
+
+ def pydantic_questions_tool(question: LossySchema) -> str: # pragma: no cover
+ return 'Ask Samuel'
+
+ agent = Agent(model, tools=[Tool(pydantic_questions_tool, strict=case.strict)])
+
+ elif case.mode == 'native':
+ output_schema = LosslessSchema if case.schema_type == 'lossless' else LossySchema
+ # we're using `NativeOutput` here, but supported models will automatically set its output_mode='native' without using `NativeOutput`.
+ output_type = (
+ NativeOutput(output_schema, strict=case.strict) if case.strict is not None else NativeOutput(output_schema)
+ )
+ agent = Agent(model, output_type=output_type)
+ else:
+ assert_never(case.mode)
+
+ if case.expect_error:
+ with pytest.raises(case.expect_error, match='Native structured output is not supported'):
+ agent.run_sync('what is Pydantic?')
+ else:
+ # The request will include strict field and beta headers as configured
+ result = agent.run_sync('what is Logfire?')
+
+ # Verify we got a response
+ assert result is not None, f'Expected response for {case.name}'
+
+ # For native output, verify we got structured output
+ if case.mode == 'native':
+ assert hasattr(result, 'output'), f'Expected structured output for {case.name}'
+ assert result.output is not None, f'Expected non-None output for {case.name}'
diff --git a/tests/models/cassettes/test_anthropic/test_anthropic_mixed_strict_tool_run.yaml b/tests/models/cassettes/test_anthropic/test_anthropic_mixed_strict_tool_run.yaml
new file mode 100644
index 0000000000..676818f8fc
--- /dev/null
+++ b/tests/models/cassettes/test_anthropic/test_anthropic_mixed_strict_tool_run.yaml
@@ -0,0 +1,296 @@
+interactions:
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '649'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: 'Use the registered tools and respond exactly as `Capital: `.'
+ type: text
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ system: Always call `country_source` first, then call `capital_lookup` with that result before replying.
+ tool_choice:
+ type: auto
+ tools:
+ - description: ''
+ input_schema:
+ additionalProperties: false
+ properties: {}
+ type: object
+ name: country_source
+ strict: true
+ - description: ''
+ input_schema:
+ additionalProperties: false
+ properties:
+ country:
+ type: string
+ required:
+ - country
+ type: object
+ name: capital_lookup
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '562'
+ content-type:
+ - application/json
+ retry-after:
+ - '4'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: I'll help you find the capital city using the available tools.
+ type: text
+ - id: toolu_01Ttepb9joVoQFHP568v7UAL
+ input: {}
+ name: country_source
+ type: tool_use
+ id: msg_01CTV3rhAAYCrzRGTEoJbJt7
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: tool_use
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 628
+ output_tokens: 50
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '996'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: 'Use the registered tools and respond exactly as `Capital: `.'
+ type: text
+ role: user
+ - content:
+ - text: I'll help you find the capital city using the available tools.
+ type: text
+ - id: toolu_01Ttepb9joVoQFHP568v7UAL
+ input: {}
+ name: country_source
+ type: tool_use
+ role: assistant
+ - content:
+ - content: Japan
+ is_error: false
+ tool_use_id: toolu_01Ttepb9joVoQFHP568v7UAL
+ type: tool_result
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ system: Always call `country_source` first, then call `capital_lookup` with that result before replying.
+ tool_choice:
+ type: auto
+ tools:
+ - description: ''
+ input_schema:
+ additionalProperties: false
+ properties: {}
+ type: object
+ name: country_source
+ strict: true
+ - description: ''
+ input_schema:
+ additionalProperties: false
+ properties:
+ country:
+ type: string
+ required:
+ - country
+ type: object
+ name: capital_lookup
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '491'
+ content-type:
+ - application/json
+ retry-after:
+ - '52'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - id: toolu_011j5uC2Tg3TZJo3nmLtJ8Mm
+ input:
+ country: Japan
+ name: capital_lookup
+ type: tool_use
+ id: msg_01KgnnRwGgZEK3kvEGM5nbW8
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: tool_use
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 691
+ output_tokens: 53
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+- request:
+ headers:
+ accept:
+ - application/json
+ accept-encoding:
+ - gzip, deflate
+ connection:
+ - keep-alive
+ content-length:
+ - '1272'
+ content-type:
+ - application/json
+ host:
+ - api.anthropic.com
+ method: POST
+ parsed_body:
+ max_tokens: 4096
+ messages:
+ - content:
+ - text: 'Use the registered tools and respond exactly as `Capital: `.'
+ type: text
+ role: user
+ - content:
+ - text: I'll help you find the capital city using the available tools.
+ type: text
+ - id: toolu_01Ttepb9joVoQFHP568v7UAL
+ input: {}
+ name: country_source
+ type: tool_use
+ role: assistant
+ - content:
+ - content: Japan
+ is_error: false
+ tool_use_id: toolu_01Ttepb9joVoQFHP568v7UAL
+ type: tool_result
+ role: user
+ - content:
+ - id: toolu_011j5uC2Tg3TZJo3nmLtJ8Mm
+ input:
+ country: Japan
+ name: capital_lookup
+ type: tool_use
+ role: assistant
+ - content:
+ - content: Tokyo
+ is_error: false
+ tool_use_id: toolu_011j5uC2Tg3TZJo3nmLtJ8Mm
+ type: tool_result
+ role: user
+ model: claude-sonnet-4-5
+ stream: false
+ system: Always call `country_source` first, then call `capital_lookup` with that result before replying.
+ tool_choice:
+ type: auto
+ tools:
+ - description: ''
+ input_schema:
+ additionalProperties: false
+ properties: {}
+ type: object
+ name: country_source
+ strict: true
+ - description: ''
+ input_schema:
+ additionalProperties: false
+ properties:
+ country:
+ type: string
+ required:
+ - country
+ type: object
+ name: capital_lookup
+ uri: https://api.anthropic.com/v1/messages?beta=true
+ response:
+ headers:
+ connection:
+ - keep-alive
+ content-length:
+ - '420'
+ content-type:
+ - application/json
+ retry-after:
+ - '36'
+ strict-transport-security:
+ - max-age=31536000; includeSubDomains; preload
+ transfer-encoding:
+ - chunked
+ parsed_body:
+ content:
+ - text: 'Capital: Tokyo'
+ type: text
+ id: msg_0111CmwjQHh6LerTTnrW2GPi
+ model: claude-sonnet-4-5-20250929
+ role: assistant
+ stop_reason: end_turn
+ stop_sequence: null
+ type: message
+ usage:
+ cache_creation:
+ ephemeral_1h_input_tokens: 0
+ ephemeral_5m_input_tokens: 0
+ cache_creation_input_tokens: 0
+ cache_read_input_tokens: 0
+ input_tokens: 757
+ output_tokens: 6
+ service_tier: standard
+ status:
+ code: 200
+ message: OK
+version: 1
diff --git a/tests/models/cassettes/test_anthropic/test_anthropic_output_tool_with_thinking.yaml b/tests/models/cassettes/test_anthropic/test_anthropic_output_tool_with_thinking.yaml
index 0ea57131be..6b85553d15 100644
--- a/tests/models/cassettes/test_anthropic/test_anthropic_output_tool_with_thinking.yaml
+++ b/tests/models/cassettes/test_anthropic/test_anthropic_output_tool_with_thinking.yaml
@@ -8,7 +8,7 @@ interactions:
connection:
- keep-alive
content-length:
- - '475'
+ - '358'
content-type:
- application/json
host:
@@ -21,15 +21,18 @@ interactions:
- text: What is 3 + 3?
type: text
role: user
- model: claude-sonnet-4-0
+ model: claude-sonnet-4-5
+ output_format:
+ schema:
+ additionalProperties: false
+ properties:
+ response:
+ type: integer
+ required:
+ - response
+ type: object
+ type: json_schema
stream: false
- system: |2
-
- Always respond with a JSON object that's compatible with this schema:
-
- {"properties": {"response": {"type": "integer"}}, "required": ["response"], "type": "object", "title": "int"}
-
- Don't include any text or Markdown fencing before or after.
thinking:
budget_tokens: 3000
type: enabled
@@ -39,27 +42,35 @@ interactions:
connection:
- keep-alive
content-length:
- - '1150'
+ - '1563'
content-type:
- application/json
retry-after:
- - '54'
+ - '48'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
content:
- - signature: EuYCCkYICRgCKkBKb+DTJGUMVOQahj61RknYW0QUDawJfq0T0GPDVPY12LCBbS7YPklMKo29mW3gdTAfPBWgYGmOj51p1jkFst2/Egw0xpDI3vnsrcqx484aDB8G93CLqlAq112quyIwq1/wOAOxPiIRklQ/i2iN/UzmWwPrGHmSS+TAq7qh2VQdi32TUk2zVXlOmTJdOSquKs0BbVTmLPWPc7szqedimy5uTbErLKLALr6DH1RRXuvGeRNElsnJofVsDu48aqeZg36g3Pi9Hboj1oE/TpyclCbv9/CWrixeQ/L/GSggr3FxLJvDgpdtppZfRxWajS6DjTH0AOU2aEu1gvxGtrcIa8htRmo5ZwAxISkaiOAm1lY5pSMl31gRFwby3n/2Y32b3UbM4SSlidDCgOTrDtbJSuwygduhfu7OdPg/I737G+sLcB0RUq4rqnPQQ+T+NYuDHPOz5xyGooXi7UNygIrO2BgB
+ - signature: EpEECkYICRgCKkBevTTAKlgYyxh1U6olWdqLd5cV0KKViBdsaidSSqI55TeC59ngdwK+NwXALIJArf9ayGq7z2Y++6f9n4gS0MmhEgxbAJIseNnXz9uSgPsaDElQizdciEcDkG0xlCIwD9zwOZA4kqNsjKp8uNgHC97LxRmdo4tUu87KSvvHnR7x0eiGo01W6CV94W71xB6IKvgChpTBQeBGjGaP7mELZonHoh0LF4tQKNVL3LnRgL9sritYl7IxwJPvVWbojmSQEoGaFJtRJJPHSReFvYlD8HKABz24PSrRCboJYTA3O1/agsuHzIdZSCf3Nhd7ftnJ3fxx6wnBs79s9TL+dbgzkiJjXrb9ZnherjWdqFJ6aPTti26i88U2co/0Q+IKtUigzBaiAGABuc5LaIzNGqeg0yPQV4pbVjvjap5jRAbzwYmpxdZMrRwbSIQ6smjkYaRg2mxs0OxvhHoDKAuAyEplRHtIYTjnjUoogqaa4TttGX3vhLKJD1WTwcp6NJlZqr34SeW1PlfG0mmuPmG8N85zQfWXQpLVuUvxdrFYePMO7dqrYgthP69zuLLgI9jUn1TteF9mgbs8nMZ9oQ99R4v2qRf7P08KFb58xxqM+/Hu5c4CazeXJsw4kiBfdNcSL4zaCZEodgGH7Yp/9jNzRSOX0UsCrZ4SnmOr7wOS8usNYr+cWK/vVR3NRgzLYxgB
thinking: |-
- The user is asking me to calculate 3 + 3, which equals 6. They want me to respond with a JSON object that has a "response" field with an integer value. So I need to return:
+ The user is asking a simple math question: 3 + 3.
+
+ 3 + 3 = 6
+
+ I need to respond with valid JSON that conforms to the schema provided. The schema requires:
+ - An object with a property called "response"
+ - The "response" property must be an integer
+ - No additional properties are allowed
+ - The "response" property is required
- {"response": 6}
+ So I need to return: {"response": 6}
type: thinking
- - text: '{"response": 6}'
+ - text: '{"response":6}'
type: text
- id: msg_013vH3ViFyo8f85fA4HJuF8A
- model: claude-sonnet-4-20250514
+ id: msg_01AoK283YkWYce78oTzoF5Qz
+ model: claude-sonnet-4-5-20250929
role: assistant
stop_reason: end_turn
stop_sequence: null
@@ -70,8 +81,8 @@ interactions:
ephemeral_5m_input_tokens: 0
cache_creation_input_tokens: 0
cache_read_input_tokens: 0
- input_tokens: 106
- output_tokens: 71
+ input_tokens: 187
+ output_tokens: 113
service_tier: standard
status:
code: 200
diff --git a/tests/models/cassettes/test_anthropic/test_anthropic_server_tool_pass_history_to_another_provider.yaml b/tests/models/cassettes/test_anthropic/test_anthropic_server_tool_pass_history_to_another_provider.yaml
index c3e8ee864a..9fa601a8a5 100644
--- a/tests/models/cassettes/test_anthropic/test_anthropic_server_tool_pass_history_to_another_provider.yaml
+++ b/tests/models/cassettes/test_anthropic/test_anthropic_server_tool_pass_history_to_another_provider.yaml
@@ -8,7 +8,7 @@ interactions:
connection:
- keep-alive
content-length:
- - '312'
+ - '321'
content-type:
- application/json
host:
@@ -28,6 +28,7 @@ interactions:
tools:
- allowed_domains: null
blocked_domains: null
+ max_uses: null
name: web_search
type: web_search_20250305
user_location: null
@@ -37,112 +38,20 @@ interactions:
connection:
- keep-alive
content-length:
- - '22237'
+ - '435'
content-type:
- application/json
+ retry-after:
+ - '58'
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
transfer-encoding:
- chunked
parsed_body:
content:
- - text: Let me search to find today's date.
+ - text: Today is November 19, 2025.
type: text
- - id: srvtoolu_01XS7jhb5cuvySCwMkqzaCZs
- input:
- query: today's date august 14 2025
- name: web_search
- type: server_tool_use
- - content:
- - encrypted_content: Eu8CCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDDVX8QvgB5nuG4dIXxoMbQcR0MrfJ0qmFbMeIjDNIUWprY+mO5ziudCAEpog8aNm02OzWCrGrqYYDkL9NN3PyHyObunBTzKvqz2jwfMq8gEyjH27jH4q8hyFW+l47E7H2nNSm1M2USZ9KzD8SbNx2FdhFWL5vBvfbJZEoKAFbyf5h5IeVHf5IgevP9teYp9bic8lJv+spMomtV3MpbUpUnuq20quS6orYVTUWXANjOZG71uE0nZ+FrdgQHGZwoaW1HIPPNKXyEn8BOfndcpI1T56sCO/hlpDi5PFMLubERAA4kTKfBLV83Tk55C5cWU359szPzjghomr3wnkWK4vzsuVO5NLBhQ2W9OeDXj8lOoSeoSe3I2i2O1HTOd6YFZ94hmEUgfSepW61c/JMKvHe7LmA/i7J5jrZRd00Im3CwAOQhgD
- page_age: null
- title: August 14, 2025 Calendar with Holidays & Count Down - USA
- type: web_search_result
- url: https://www.wincalendar.com/Calendar/Date/August-14-2025
- - encrypted_content: EvUFCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDGi5R5LAyeIUIVSHRxoM43cSby7rkVpZZASWIjDoq+CJ+BP63k3QlL8e2PAk0upRfRq2WxcGyDZCJdxxy7JjRrg6EcK80ZxLzrAIaqEq+AT5oTxSfcGz+3Aq/LFZYAweyUgQ76LGgKNoDE7OeIUdx7YAhZvmhf8/MrmLsFu9oDUpxrPzi2ftA4BZzFNjCtLDr7+xaKuqHfmOodHh0cJoCNsW9XbwOCOBrFIqkrkLgYw3Jju9dZOvkpYE0DJ1rckO14NVOaUGxO9vaJWKlByAhVVhGJ1v041AFdrpLFVJhwpsP5cQFeKIbN8YrPyoUcU5gLc2cU6v4JwqdRwPhlSzdTv9Y5PxvfQvwtIBXtYm6LYeJKgmyx0DA8BaHdLBOdKj6Awp2tKf1qMYfMezrQOhU7j+cW5BvlWbhkbPc3I7MtPGKBKYehbOgrZry51m71eLzvsLUfUAOymZi4kV1Dkp20p0RwKVIO5oVdDUp5ADMjOy6fGt7qTD1dfTLfNq/fVIyaRtzoKjTB+kQGsIUHz426pF6xzirZ6ThUTeYiDzpMjCXywt79VBT6R+XypHCSJTGXwfVwxwLOcXnXkufXoo2POdDAMw8YHS9765Oxcqnr77cY9JvIznCtgcTsOAkMU4Ro3PlWu/RODsd1f52n8v94HWVdnUSzwo05po2TM8Vd6Gbi+ofeFKtuUuWXeDHm5aktAzcCjBB+5Xqs4YEcVA8G+HsN7tu2FSvypl5O9i61UJ/kUSEEyn6Ff40YLEkATomwVPxNhbpIsDxohHbybeGc70nSHWc6psWs5mnfjuCmR7jSdNjSZtwuzAW0zBSv+OmYTF+fdyGPH3PqlF+FBapYRRnPaW+MLOhTMaLfUbmLGZDceUkQqW2wCRLgMtsGClDQHbSuI8UOvK5kXckqBAIPVJ5HzELYfhn/gPI2jjVJhwGJDfK85mTBgD
- page_age: null
- title: How long until August 14th 2025? | howlongagogo.com
- type: web_search_result
- url: https://howlongagogo.com/date/2025/august/14
- - encrypted_content: ErYGCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDCz3OJhK3poJTguoMBoMo/tRhDq6TJ3e8JCAIjCC1UNWvlJHpBtB1lqoglwG1zThVF5VosJLm7Q9zJXY/YgaO3w0DG/s8PR11Bvy0kQquQWs1VysoI2K2kT2Fjsp49Jna4SXreEmt8TUDsntSvyoqssdieve5/GkOnvWLPjyBBYRtic2nyFdo0PYzgpx/QzcoVH+jsn+TSOKS9G3j7YE0fcP86Bq+AiMO8JQs8caeJkKFxtxISwcPPJ8xJTFFm1WXbYTqaRgONxwpad9TPxg5w30CtzHXamKx1McrhXEONeh6OAdtid88IAHKe4gL3uiuiFX6B/nVyo0bSmMsOj3kxQELvYwcyPm28+YJsupChI2DAq2qCtLBWj4qy2UnHqgENTDwe6oUh1PktUnGCqzpROgIl8leICvXx2Hl5qhaFOUn+wY619FbjUN1ENuyqgN9m358dV5TIYLIf4L9AZ7AtgNpOAxGiE95zFjyJL2DBumbCZ5Ik7t+bwjEdMlifzknNJqmAiRR2jAuKkxs5noSeu/N7PHhPMRy7ZxrBOdV5N1CY0of8b2lVMXfNdUE9iWAF1Fm8zBFPlcxcQyJEDGQkyIxeALs+53ylqYhiRsZXd8yoDPO1IGtJ7z5Brk4DmNrz3vcYQKe5wUOnqY5dGAcUBr3uo0Ll7Y1PXZlCsdpSK/sWYQyZOvBysm49w7zr2N1OPV9kLoq2mBnltbTLo5XUZ4Ukmu3nb+owbmfe2YtorsFOyfwRI4Zt46+chfFr5EE/3mfJ1F4sdJFXYi97cGGkDGChBw1ere2ptcwJybhDmHEqFbUD9T1bhX48Yzx3ATxGRRuz1bqKI/eBokSjxExp1H6PQwTtvguzq6e4Qv4wt4zKD+PhLqPOtw4ysxLgQNu2cCMwtjV9BVad/RWVabIQETqDd6x54yWp4m4RrHTrMCyw8nLkP8hPG6pckLZq+KTVPZmito6XID1mlbeOqFRA1EDVK+xfyPVBVKDMzcu72jMrl/2t/e99//XLk5rzqlICNAziBt0iBwGAM=
- page_age: null
- title: August 2025 Calendar
- type: web_search_result
- url: https://www.calendar-365.com/calendar/2025/August.html
- - encrypted_content: ErgFCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDIkW36VDtOPzSfgmpBoMSZ+nlyuqy0yuIq1lIjCkRZIUxV91UR8uHnvXVth7d9DS0R5ZgSNBFQXl3SSd+26dSXOzCvzFq0Jq8/Xor+8quwTkX2ad5IdojEljGThehcAJDrESWUkjqJewbzAcpyZKJ8JF4L1h6vw/spTVBwwL/ffoLBXqT2yzPrjnGdimaA4J0FEYzLknWithI4G1vrB4Y7f/YsSLtd88adx4HWo60sWRjIoizm6sz2odQhUf6yatZfycqh2NLpQ1MOcEmlLZDFNkD3enbIYmBS01/eltjcQmH1CEp/aqE8gN9IkY7Rr2BRViYH11rXkWjP6MQYzhFhPWPmChEw3PouVv8W4kSsbrO4SbbWjnp72Q53fyeJjqda9TltlanR6c/iv/UWFcPXdmsCZhGUiXapfWGWRXABwixL0TsFCmC6VxhgTg4lj18EBk1wsRj5wqmECfzgjzupisXMBgMymwK6RlYGtQI+6BML2zfXNGaZkl7GU6meuJdCQ6Kx4heBkge9RtyAdeXZyPBicCTUGEAp4Fkk12wmS0qC0wu1NPtd+kl6NF2vBihNP6WX/nJQWmKG9JtC0UYnH9RNhd9H4N8Q9k9AY5++Fhen4b/uP1zPplPqrqMsrCHkeK9rPw9y/DlMn5wNTRQ931pLMQKuet14KGWz2buvgQXICCUZlQmOqMjrwdun8ehgWadLuKflHmJME/fM987OiDckCPcp//aRihDo8T4DL6wWzTPU4CQlNUU1butTXcdX415pZ1gMDZi8w9U2y2qigo982U9sMNARif4jMkQca2gyKECUlfnH/GBg/LYkzhwGWqeibrqTG5klvHg9SD+u8MYTxTmbXnTZXoGAM=
- page_age: null
- title: How many days until 14th August 2025?
- type: web_search_result
- url: https://days.to/14-august/2025
- - encrypted_content: ErwICioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDFalY5JZZv1KSYCilRoMHngZbao7hqbNY064IjCepPltLOs0hrwCFQ5TjRZCXu7uL+DLRfbXe3ts18qgGtJdPEA5L3uZWFusGnaZlQMqvweh6sxsxBYBjAzh9qMFDuhEyN5h9jDBCgu9zh7qlRy+Wm78asqsgTdoJTCkOu42zWl3Wm5YxDFD7SMFziV8Zr87nBhX9Qtc8U944HhBNzpgUlAHR3E0+TCrMTS3h1mepR2OiTWbisyktC3nuA1WN7KJf9hp0H+2WmugWJ3ktB8osZ4/Dri6X1uXxK38JYJ/3xinTjTHxJnh8/eieVv6Dz2AC2zhVux+dXUmNdmLpop7mOsSHCmY0M6loacof79BrGJcEt7Aeb56jIOM5nHqCtoXVwgu0H+dQZKE8jFR91Wz4a6GCJbDJ6EhC5h9wx0gS+YwXYjUFoB6NoQ9F1AlUPpO4oF4KzwdW8JzInkqa3GVr+C8Tab2IQyK9LOtQZv3kfbhPbPCY37IuPmc0xkDuBDXqRNRHU1f7tjLLXUSeYy0V+DSlpMFTouDjL0kWseU/68/dPg7cV2fcTWBdP+e9yVUohMCh9D8GnXPMgKlN4VlqX9MWHTtG06p2r8BIcqHAl3sBRrEG6rZyop6D7oAtzEQYKu9IMBY+rQumrCHQpSS2pQfNPO72ILZMePeogAOwiAzf5T1v5RptcKJVwCzlZGFkHK33tHZRRa7McPahnQes83fO+cUpcjsYgoYhUSArIAoxUP9G2hvImrwHvui4aoeH/VhW1cZUZYk2pFccKLcXxEzZoYnhreIRBRFAGRp4dLLwNVCLE8IR4ISeVEptG5ZSF9pG6jw0cbM+j8xDIllk88pPJp9MVJVZpFJu5Tm8rZnHKtMcrAK7OOtorKUPjpdshbZ0Cmdj27rzLc8MJ40sTZs+y/UqJmyddad4L7fWu/ekO5KgkSk2IXuE9P7YtkSJc1HYt4DG3cQfxCk0/8/r4eSFSMrTZHGpMyu+jp0UP68Arr9MR1qF+X3xRG3dEhf+HlN4lwHfqtsLMF2XpLwaFqPQ+f5fbotZjvtklMppjt5qsbARTJX3zWty1x5UlAEQhbp4+pCyncDzu2tR/6V4R9xpc3Nif6WCKbWcm8YnC0QCwhnyzz3DwQU7FWIh0qAmK98M80Hm9hz96YAZcdZo/c7/Rtm3pXmTto9YQAk596UrvcHpDM3KkUXOX45bZT8+oC8CQ5r7Op8Pj+jEswUM2fp8x3a7E1GpSxcccy7pV2HkidJPfgJ0mK9LsDeglcLlOlhEVClPKGndVH9t4aJQQJ2ExTLGqyWpznrvJ6AJHIf/I3HSaPMb9Lmo6LGLSQ/JitIeaNVLIlhV/wZMcc14jzenEj4Aod5E0JtUA5V0xgD
- page_age: null
- title: What Day of the Week Is August 14, 2025?
- type: web_search_result
- url: https://www.dayoftheweek.org/?m=August&d=14&y=2025&go=Go
- - encrypted_content: Eq4TCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDHocqBAfnylKaNAXtBoMsESQYL+vau9u+ByuIjBKyKio6Tfor0QRIXLH2Ky7i1j8OOjHLYMwz5FAi3UkOKXSAiMjR3x1eHp4lWQAiDkqsRJSl+bqlbOLsKoDN7B7sLYFOAbFJQhVOTSD25C8kpImLOIfGXvQXvvn583TpG8Xsc1/QpX+j3G4rrduP9UZXpshf5lI3Hx2SBfNDs7XF/qaZbJPyRvBAFE0M3jEqAHNSikqulVGuAbKHXYDNLf7JPXl2OdA5JHzIRxsn3tttcT1JhoE2jB0MVrPmd9S/hQCaw29ROZ/TTSz2GCwYs3WYskLjTlA6QAqSS7qm6O7OqrQBqQRPewXYZX7r598H/mA4R04CMxxphCwOnhgdAdwqOjc79TmMdFdYes9Dw4SkdAZkHT+WrRpGLNbFe189MO46G9mNIk7hucz0nSPU6f+bF2uMVvaRxYifkxGstgQehiJU4hfylvCloKkV7RyHlFtnOLleXPT6LVKWZPJTUU6xUtzWsyaV0bjw6tQuzmR7lIhsgk7N8uM1+bYdGm2mgDOIvUcgAjzjFV59m9kSzyZRqy6C1/FzjwkqT7TEMK9UsVjWcnaHPLonkoaZrIPoK9aEKjzRwaJ4HfJSLSwarD8wUWONurommHojL0HeBtF0zOFMgNDE/bt3Vk6ohxoxP2O6l3k7RHEvmgmIv449iLIJ/9AazWqF6aXon5pGc69mPnKGikBfv7Ry6hW+7qiEwlVi2lR7e4i8Na30NeUcbLo6gwyLi9dlgIY3Fvz3HA8OzMpGiEdxmYVg2HRstGz85JntArcy7z7ChwOexCrQzS0paKnpyMqDCUgWNjpqEE3Ty+C0Q5y+8w60az/BWpd7k3pYOyROWuDzBShPm1OM/3BYoFNeadVQRW6WZOyRdJw3o8WuVl35t1DPAQSaCUHEnmKbPXNOkg+HJo4Xqrvzx227Jz+qS2Uutb0PeCq6pOwIPzG7A8hCuB+okd9VfkaAWlX7pxa8b7bfwdYiIfDljaDvXWbNdJOZrRjcnby4sll8zxQ/IW9ATdDB6hOHaqeKiPOS9qMOJVu9JwYF6Zv6OL6tLMgJIcULhcRsLNPckvAdJQ2VKoT4Y5omf8nLyIltMexhf4giomYghTdoou+u6Tr+nhyxTVeIFK708ILuQwbWX4poP5nMdeFM0AJPAave09+pvc4VCmjoroN3hamOUV/tovDTHon3msC3fmKJ7j7ndVFshKbcnKhii+vrCNHQOc29iKBOQj7AtQOSvhgYSbuNyxmt2YRFgHLC3KFRmlaZXdxArdsaRwHicEgSygHN14LTg/D8UfiayT4b5eGI1AZoY8NKMMBlGrpHUaPiuWynMWxTUBe9de3XfIYZsruw38PynR+OOS6Mt02Lr89p4PEmBZz7tQXa1gIkbEQmpFK2XVaW7vz6J5aOA/X5ID87EM38A39nfT6Q2peqfMgshfoEk8GKSIqbG+lfvj76w/w8nKSbppgm3SQJDwkWblu6nSRlhqSjaOGLWsp9EawsA8IIl2+f95UpXHRzTSntrmQMbPLFqbJQlEqqVqgGK/V8q1Mf4IoU0NOCt7lvWpqClfEqAQNfxQIiLAQbkIgfAgc6Ki5nVJ5hgrUTXPpIwSp854ZrohAntW/5394acYg6GtV/izqDVFKevHk0D902qQQ2Aj6u3YjNj92OXpM3kf2mfV8ZosJ6edFdvm2FIXIE5geghkd/M1w+aNHgWwi6INEZQVKn7RtTItWIlBovyXr2PJuSLAesH5h8gQ86MWvNZMpVn9zOF2UjW58kIAJHv1v3vaQ8euGVHfbxx33IqBkwcYkZ0i+WcNx1Q2HBkvsadt+wCurLJygjmstMQpOOusIWSHbNVzi6SEtOc9Wd5P0t/cprW8wiIObqqnsHkmAx6atlAI4pf02XnNZGreU3fT4NVQBiofLmOXxXWi6EzsbjRPDPgnn29RV9FeKymoLsc0xZ8MFQ2ZB2321yW+VKdaVIG/sCMGfZguXttEReR5g5ReLz3HsikuM+ss9wqfDG6NaXW/Zd8/v1Y8L9R23akyy7m1F5JzkL6Webhd0fWAOk8tt0iKYo40uvNPG+63m1iErodxheF7y3DSfmJU5XXF+gyGTlOgQtG3jbn9nv52B9f5FOvPny+R5CXVYh3Tngf2S/doOGriX1tdVY8sgogRL6yZjbgOTNUvyhIUM7Au8TmNf+A/3pZCtZxGkHqOCROKhm0zOgvxixAGHXlmYKZSJjPB1fnc/v4SwdajYep0KQE9h/4QQmnRuZ1mNIJhPexLfY3zErEvTjNDeVQ7GqVdyVktEE/mJZgLTVjx+Vnh1Nhn86fGYZXkY4PlmrbljeCRVxox0hM3vh1BcIO/2MWtJIjDcMt80MTlr9WByFOpi5Nm1Um5y8PyZOY3SElgHuijHFPPIWdsW86Gk6c05uGdS2YpYfDGo7bbZJH5B/0MhCFeuLenzlivz98mhn3zha0qRgcanRhy1iLqjev8bLXDB1NFZJN0TKaAdpucqASllICgTjjfAotOg/1Y3Bt6s+MpWPeA0eATUTkIoikmDjWRuUucjQ0rjzpA/g03gvG9lc0fOGojL9sSe+0bfhV53bd2nV9akEFuG6g74S/QoYUSwkiscUegnHdhIG1gIxxR1ZEWfK3+8gXFoM32H/nlrIIRYBd/iPJNK0OZ4YAuQHLvY1zH3Ly22ErFwiC0uyRpJpiQjc+4X+GOu4FUBc1DQP2Lla4b3DFjWkAhGWwbIjoHgwJrQ3svb3eUXs1PIe9vvDTXLXuCOGnqYpTUtp4ZuyYDM8TY5+yLJD+9IC2FLbnbGZpyEPLazG0aR6lLHQS1MZX4G2MCU8rhwnKQ2jSj9SukBHELD9+VnyCmWyaqG59N1iC8oU0GgaR3lm1yraiWuhST03EFESZgMGv8CdaVnuyqwkHwtfZbRoPH5gxPj4Dur976DcJIl37STTntWBWIKKyIMdSZc7/WTM+tz3Wy9EFuioJUalY0EypZiO6zQSSf04D4TO8CZlXXXy/SqQUywxWNty+f3yrLydnD9VvJgqzWGmma9XVeHL4NHzOJCvLi0aV9fcs/albFh+1CYyiOXrzV8v11t2a3mgHkRHI+LJHLmFk6qIOolaY4SeVoXgi24hb14W8bHWwilAwzEZHVgqf78EPCx0Pq1pWb2Y/c/joKhwIIB3SYcdu75muJ4VHD2qQvhW0s1AL9T42eaRsulDLyIc2kJGAM=
- page_age: null
- title: Holidays for August 14th, 2025 | Checkiday.com
- type: web_search_result
- url: https://www.checkiday.com/8/14/2025
- - encrypted_content: Eo0GCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDCHvW5bllVlNwq3NXRoMJse9HFPZ6cP3pQAsIjCK+l/qeMt8XJUBOBZeTHaJ4MgkotNocV2WzjoNhcNA50iD5VLiigO2KBgXTOJ0qvMqkAXlsKgwnaHnJy8DZ6IA0vPv5QPTP2WB61EXJQMtbNQysOdA2+Z/1qqhQaedV1nOX8MOQDnGGF1PZ6/W174iBkhmkBJXem+e0JRIvTDT0CimRx9Dg0W0Km5q4sCV3cUfSDK8etpv5fWYes/txuoKC1ImPN3eb/tlEYuG4EkDgkK2/QdF2RPaANwpyOSaGz4XwuCWyeb6Rh1sG2RCAMPaGGnaxnaMy0j3u4FnHJtZ0CFmn/DkIVVXrZatiNdAvttuKgGB0ufqpQiBOki/+7DyJ1Q9LAgVFpEWnApFgZHClCd0DWTmxmAfMUMFYpx1x4otz/wSX0StbS+qIIFbBRn0brnEu0DBdIFh7fycyNZl0l3VPJ7j02NCcBcMW5PjE9KsFW+pCbGqcL1+tKTPCuvYRz7yiLvXBaXJJ+a505vHrShNNm9SGkMCl6cpCmzSbMGv06ss0PGdoUibjqb/Vy/Wh8cvLq6uZxEUzY4kMNJYCRoHfBGfqvKfwXG2GkWxoffqudHnW+e5bdR8Eswe/BSoBIrJE2f4AbxsTcMeX/VizoaCi/PWA2X+dpwmTU67yyGTO8bUxXuOw/Xj2jppC0xHh5FVcYHPvN7SAUshbkweiv4DvZAXRqL5XxUSz25lcK3v83SyrEM/KyqZQElVmTL9W99Ybu4UQzv/4FJUWceeJXPF3dN6qMVLjAeVnUZULMDNfSHfuTpGre1W88nwOAhqb05Yuq7pLHYvMp/qn9Ny2CTC9sC2WjqDqwTx0dmb5Hzy349Cs+GQBF4n+pae1j1ynM9ZSlRMigeXGJZuXdpk8ip6liWgiIaNIvoZD7eHMQb/SX8NSnxL5EEUPnKbnl9zJyiGMb1D4QZYjxgCASxrWraeJBgD
- page_age: null
- title: Countdown to Aug 14, 2025 in Washington DC, District of Columbia
- type: web_search_result
- url: https://www.timeanddate.com/countdown/to?msg=mrfoox's+birthday&year=2025&month=8&day=14&hour=0&min=0&sec=0&fromtheme=birthday
- - encrypted_content: EpMMCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDJOiGPGzmV1rbllwMRoM5uNnQNW5DQx2feA2IjCD6PAOEByso/FonofJX/la6AfWKyhYIjo3LnGDq7w6GZQJzUPerILEvsK/IFp/NhYqlguNJOlaODO1wbTITUfiZ/swMzeQJ/DTBJgMMxny8ecFmaG69BbPJP25SmwpE1sVh6ALVuS30nUuyFPsjjs2NK/q5w0wQh0NK6Pdj4fCr/TZkqJMKxuPQPMv65W14fZez8H+1NNz0tUO4s2dVgKPuSYxztGH5t22/JKZFBHbl4gJ9eN6z0+Hg/M1fE+2Jw2KeAz5ej8JJR8wMbADt5AAJ7s7E0ossOhCmFwvY+lMo2sz6CSXonHuHfeJ3zvLVhVNKL09yqvSCKyjjXHmzLJf+J7zZAGOB4anCl3fvG9IY4JRxjs5cZtjnZgMUvic+fpcUccsxG7zEGvNkaAkDDBPS5OldHhMgiSpLLXkfPZHaGDrK/NkXOMDxFPo3yMO/en+nJh2SQhWYHBxcK2CCDYOjW2R8NKf1Q30FXEM8KEoevUxP+/+DHFIL0MzyjcjwcxXrsdIE4xpftAgs45YX4R6rZUek8EBaOp3SGZIMg0KJwP0elSobMZMzwTdz6wUxD/BuwKwxSrelxa7JNtnmXyxPgm65ucOFM1+cKNtCKvAaFrsci9SsXaZ1/QH7srV8t2a3BP1JrteQafqOi7f7qsth1f/gHWJLfDM3+BKinT6NiQFB6O1qHg1Gszn7iaqIhphDUaAchcMRpZpNBEutP9VfHMQr7Q/3R3L2Vr1p1+HoyT140ARNJ/9oSd9fgavOrFGdLALtAaCkqe3PwQn2VFDkuxJiWLUSg59jZ6+FA4TNhlCQs4m36YK/z8tFCbA4upz5jAs/qTCmH3X1Iv4UdjgAfOY2ahbaokm/FiGcDxZMhHeY7EAu8oAwFbGoV/+C1cyBl/p3ZxeJB4+dxf+xcxNPGxmsFa0U5EPZfPIV14f+WNACE6d/hFM+sgI+9OlZRsYL4jOSo8kKAIechdRt35izRJ02i8AYx7p7/8O3FXPoPaTW8kjfsqtQrA3xTtMn0HADABw+Mialxi2DRtbUlawgQqItyxkJr32Gww9I1/tzlW6/QnyQbR59w6dUifKDp01pLubhcVDNjDseCvF5JIxp1WxXmnb2SNTJna5/ZKcfEb6e1MEs558YR7LJrzj8/w/62dqB2e5PY8aKLYDr0p9+/LjTfu5hkLT6v0rO0z1jTKPFFnCMA4yWtg6t4PET/mZ/4nWlm9Brd05oZiFIQnogzxldsTmSdopKQH7tVG3UsHPoLIA2YT+v9SU8cHSFf6ggqOJ3inbK+JupabGt6nNM9vIN++QZ5YzgpGoPhVWYAk2TM/fW2SyHxm19I00kYAdMUzn63Hkq9zM+Tm8oJSZd3cIp6XZguIIS7LvQTDd8JGtCEZNteo9qm/Qht0V3r0oZfRrEyLETXcxtOsD1dUJXP/My/nrpS9wHMnQYjIgd0iAhVnAFGq82lpQ0n0GoWoxY4nboU7vzKYiwpc5f74VPfwwh+G+fu6SjYrN4lAZWB/B0Gw1HbNixtSXnHdENse/AGnadcZUd2ABrM/PopFsUCoVUd+LbZt4hsCZLxY1kgT8mwZdb24KAg/xdCZvHlQsId7NKQTgRpkYh7ntYS1siscUPC6XbNGJk5g5HqJLnChnGDtUoqQ2C2+QoFWUF3m6AV2SwOlbsz0JC9XwpgSO0ElikVcKq72kMfwCG/0HIAFOTQp3CYURzZgBJ1ffoTqD6f7/hQZq4gki6UcK9OUL2gQxwS0EJPFsW20zvmQjKlP3cM5pXPMdwMlXmHvS3lF/MrHfm3YuQgaw2IFFHSSFwgeWddAUqSAkOOsdSlK5vb3DKygG30zwPYs6wLZzit4QZlCkjLYq1b9Ka1Xc3KJTh1MLliV2LZtt0b74LpHS7xFfO4G21529dwtxaqNSrcKL5bTDVGEhaXr4RJHqDvYxu2omSa3JyJxKo9258MVovx2GAI29arDoeRgD
- page_age: null
- title: Daily Calendar for Thursday, August 14, 2025 | Almanac.com
- type: web_search_result
- url: https://www.almanac.com/calendar/date/2025-08-14
- - encrypted_content: EswGCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDH7gIkgK2zLs5pzDRRoMuF01Vpl3lHsp3Y3xIjB+VBq1OC0DYkQmH/JhVhO3cY4zmrYxp1cHTuuOJ99dAgZSzWU6+gUoQumtTQlpg7YqzwWLxX4UFPE4Jlqm6QKvQWIaUxZ+g5LcUicBxA5jksMx9vQE3yvy//a6ETOMyLT/2xyGWHQAIhwi59bU7fOv7SidTefS2pWxEs18Gsw+bYimcG4uWx1h6T57ha1qOANtuHt16P+1Z9rM67yiYdj9X1AObPTz2njMbMb02RR72SHrqaC9GUkQq9fNTAOp+pSVA7CH4wo/GihdmIUcYNlNkQPl2IiwUgoMOffTsyV+Ory7TRcwMWPYQkJRczOLE1AleAmd/RnJa68dUmQ0drJazp/lqy5S0pI98BrrC7TzuOxObBRbFiaLo9yLMQAbZg+pxmMTD/VzMoWYViMk5uPT4p7h3qIJDvZA0/Qw8lOvr3VfPi6Jekj5h8qdGVR4AY56IVIGs3yBnxiG3/0AHq4+9nMUuZt1sNjagljGXP1A/JJIjketxefbLQ+z/t2ndvIbMrWTjDQMt0t+Lur9FaSW1WHW9+eUrxoXhS2bj2EUorTD6Xrg0W+Ugzvv6ius6DIuWr/DITDO7j/hWZtOT+4MycYv15x05CFfytv5l9YyKTxmBm7DXxyOwz4tfE+QTehb32D9yPyh2JRUx5AYHla/qT4AZxqPE20aUA3NFMztY433ljxGK9KMA0okSEgIy0gEuznq1soKACa5BhmIP9RiqekHCu/uW1/jVyWxWLgYdaZRyEp24ricn8yOTzsTO+ygP/OeYGMUFZmRG+mBwwOXcv4xYyHecU2qEtX+dmaVqxQX8u4OcBgksDbVI95OAUBL9Te7KTcm1A1Vg2sekUdiKZyHEzHLXjl8O0PJ5vzs4ziYcXXbmsykdisYoMcUoMIpcsmKi2fVaF1zu+VY9scRjTLPLHGx1LRYD/iaPhFbfgvFP2LDuflpcBKgTEZfdWmx9pci2IJksrUh5tvVTV+xdSWAhoMNfyzFCpqtZOUEh9IXs/Kn8yyiJ2kZCFejJ/0EhxgD
- page_age: January 1, 2019
- title: Thursday 14th August 2025 | There is a Day for that!
- type: web_search_result
- url: https://www.thereisadayforthat.com/calendars/2025/8/14
- - encrypted_content: EpYfCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDBMSx1asuxE9fe5KohoM0CCM3Qsh89JlBE8cIjAJ7JRxXvIU7qv5vXtn1uCHhn43kYTpeBkTUYOV3I5LtmP/jI4UB2DgvBLScBNY1qAqmR6n1XNwn+v/rLSctsybpe12L1ep2shiIbZS7jV73crMTXe0cwNF3k4aAZ/DHLtdhtGunOzU+dAQOFdlVvf/SA9r3fRE2EWsLIbWYvf9F1j+NAktYfTRAjgwfYLrSITtiNoPMBwI/jrCBVRUl/4Ap/Z3E8TgeJrtOAROFqL5ernODYUHFahTz2mUkw3yMAF5yPGino1BrfZ8aSrwaAWctoFMFnW9JTB3rAKJC3k/id6U03/sZA88BqzbGCSXs7W9pINAMDdQOhspxKwdHVqykuIgvTM0ZKnfl2hq0eUmjdyWgWHRN6aLGTEwbxRoD9JyNvvLxAhQv5iPMFAKkZ7E5HF7LxXO0SzeMT1dYsxIQLtkj0TI/2cx7LsEazysIyr5eLMMYksJhRPpLh43TkH81zdFmHvs58G3vOHFtclcL4kqfEP9nXqramcTAddjMS9P6LC1zxwIWqyK0K5Stnya6qX33k5qv9QMVtO32KkvrYWYRhre0c3FX9jfGQF2ZzPiNiDIrAH+lzsJNEmtn7ar722cFWQBAO10UFFgeKDMzbkr6FFKEJOYnJ2cmbewx4WDgMVW/PlnEk9sv5bSRliLEJRg6Eq51bYHrU+7RoZgvkda8Tx5RHwBRzb8/okn0cCOlfPoNTaJvRdOxNbNdezyKP1ua1daRTvA9zBwS9A2PPrhpZ2v5Wq3BFHpv73V+HiAfdcNItf7/VzlNOns9S/4py+fEf18wPlBEqbkGAo4eUGetXqJeYzKJYk6/sD3jCQbxDKrmP4FwaO7rLEYFSwx+iIasUuM2v3FnBf2+Gun8W+eU4t48pCHhdT0FtHAcIcYc3kxyjwSJ3RI5v9PZykC7kI/Q2Uwz6sDLQlAS3KXrMCS5yvwGvrWaoZ4npoCPGSCD56DTFrYPexDM5n3lPCiLwW9YnS9H2xS4KQn1htZBPNa+Yq/g/EOwbwE2Pd/ZPwTXKF4s1eDWxYJI8HIZP/BftZlIJqrd7ykI24Z1VFRjOM8db8dSapHRANGykoXJ3hu60TcKehsYzeId4nhsSOjtuPcj6JDi89nxKMzQAEzARqW0AQRMyDMkL5S7v2zA5pyuQdiUuyoKDkqEWcbbCrCQCaooY3l3Iw8nrxF+cxO5hplAIGswkzSvgdPlUUKRvua4sZvo7+oVu4MKJnFtUXr/7NS4UlHrpv577JVQvJDyKU0zd8GV3BXwryUckluE8rhvG5wzgyrvv6ExCATMgpNEF1O3rDT8NseJa/PNX30XwoBB4b55TlbvnoixXnPmFhCzsvL7YN461eta/MzQq16y/pOw7dZXVuPypEVqfCnXQaSpm9ERfBQRs/wJvjDw6qb3auTmxT6uJyCuAlZuF8v10pp1NFg2HKZUA7Sa8IsIWEeAf65QT1gZRe3RbdDPcdQvFPRbyxKaNXn8MnsbPRV9K4c9+aR1MGPE6Lj7wiP7ZrrIKeRFeCucHZhBf61GsLzVRRShtrmaB7mPBkVXDqyOHmPHiQwbQoL/vrZLchyJ8zr5C1wpLg6DNtgv4Ap0FHg4Zx8G2NneGPh2d3a1Qy0cojz90Eg8e6aUftxdqwD5c0phD0R6Q4ob29+Oa8T9j0ha/HCr61DT2m6IV93J8BhxiQh/Q+kXiVs/0Yqe5tZ+bIbea5nE8OyfRvhcF+iMaE8xgUQffiI4MEdSDzSvh72keQuU3JEwTjZCN7FGgFlUAXN/jYo3Y80XvfqyjE5Lse+8qX5EHhWScLXArgBLj+h5UJxiV09yJpTht2b+ZLfBjEd1kYoU1g1OxpSHQgPYrgNmds0jEz9U1cJP0wpWVJ8e6ID2+4RHEUwmYO4s/+fmOd0Ohv3p+GbHH3pMf8fpf3FVfDzQ2X0ZobpRVYwUX1ZzerT2OK3BsSXxqXQ9XR5RGshOsbBEkzYCjylVA/TRJA9HIlSNTs8bRhjS5dzySQTeJC66K3hGe5Dl3COgX4gsvyL1l6ne9XtmRgOUhUf5VB3N+7xsk23bcTB35dOQkVPOJYt5a3KC2SAjkw2TeysWoVqUvJn3JxOttRLr3eHt+Pdc51h/SGy+Z1qiYCKiPYGeeooF8idmVIbuAbvopuGNHasdM6g7lXMiUO3cRBMM16o/hDaVyb6IuCL3RItYWKFtmXXHNzLckekcPGIWLU/V7fjgwcOtMPbTSDljjsuk7/S4iq0NYdyorW/LlVaN8J4XgUhOMcvmfxIxhv2qbBPFbW44+HVk8Ku6OYWbmj6WyWoJgB2/HIG7HGPHI4q/sBZAQC28xE3EIXBagMtOWBsW+t1TxpEC1wqpdKQw65di7fZKAPHo/+dgK6p0Uiid7HJ9SY97SPiOnK5eXAdD74svWvtaAy0vIUtjh0mbmkE0qATL1xjm71UBaSEvEGlpTpukf4iXroCJ+daYlJGoz5JTRA1Uao26Bt04m3JlMNgUUeRBaagtVzBNeIsDvDRNbWZRMg+G43BuGC96icEkFKNWRMAGC4VhsTst0/TtJpgCG3R2OmrzwGsHwAKP40eU8kWUM8ePuLqdTp5UmbFXbADG4xtR3zjFpJSmuSUe0by+kd2gzkJ8dHNsTORRonWr/eksfjtRxdqz0Bw29vxwT/SO6+6+lfUX2p2RH/z3RW03iA8LPCahTmWa1G80qnqvSuj+0uEx/1eBnB+Iy6gBRQbv8Z7/kW97RNODWh6wSRzCWlwMv0O4EdCmokqKlNdcgg77KEMUBft39hAYRE2QPQrSfpE/js3DJU7fnrhLCx/yajpy3+BUsDZLHal5rP14DwEGRNPBi9NCF50e8rGSL/06YA6kYhJRWXWQOUT6qo2VxJevv0tgVjrGvuf1KGS/R8LHcERwevjXxocHjQ/Np4cmH7jCsOJ0I3trXQuTv7GBaU98aYmoOB1MI+XdztR2ZxmbiwlI9nnfpaQeMB62odJ0/dcN673Xx70kXJIqTCvTj6+F1aBhqIbsCkntYE89Q3BtrlaGB/FN6Ik95JStVfxW9h/9IpxUAspFSH9/wcr83nAjH7aMJWCuuYjegDmH1BeHYWAwAYNUQM9UxW0jyCZuYrMNrCK5muXCYxlt0xNT7J3TCOP9OK/OROPYKl8ww61suXdCNLN7n98YzaGlKiHK6QLBqvreTUHgrtgBHKpVSeaGDVhx1kOYqqOfLr7IOUFsGFqAdvRIUJGXUxlhTcnQMOMnUmOaTvaeRotaocWRG0KoFgrKkwRuh4h57FyXsIXGqV5YG4NahNRV7DYpjGdXEM3GgZzOCxvTRnZ+wVH/TSmr7TNyrrm1d9n+kB2HQbznhBReb+V6xUACQsm/o3T+WtlbFrY006MlYG+kxwUAhdXvUhNjWnIOC4QZHtXCnxXxXLEayF1iWlrvEhGd9yhH1sxCPvWm/vOBMGI/7B/xdoAG8yZLQl73clZ9k2o4ZIR/5frVecvDDvXwaIqpbRXMHbCaGJD1WRNq/2/ry7hRHImgZMJcvaMjo2byb79AHeBkuVxY7qAqgMk2nOBjEmdz/LIjdgnRun8Nz4EjcR/ri4xA+oc3g9tRrqLG/O7pWnNQpf10doOJEj/8JznrNhWHPWps5CrBM7bGLOJ8EBnDiHBXmOpHKZ1Hh+Q+bXS87IljkmfZY4pXaERQqKWab2+AeTY8WLFfKZFMAe+2xy48+7JvidnAVYNVcij965Vy1AXTLn5B9Sz38JxwpXlUygdKNcLN7Yc/c2IgeCgq7/dScmNuDQIzYEyM63CvQ4IXo/GugqEA6ExHfNgJ44iuEZHb74oGmyHbw2sYPbnRjzGk116yXsdApcnLWUovHqk2P15h5ZJUPXMaiZeqD0EnNa+9fh8lD6zm2zwXmim7imwm7nnCeRZyLCqPTWam0Lu/eKwWkKtvC/5NnlcGUV7zv8YIgELX9JOqqRz97Jqu5S28djOvCNTe06dDZuZhg89pjaHPuFX4V2Oy3xn4X2XfMkHhpL5AtgYiMvW3lSrbs6yRzMubkQUdDKIA+ThBMDjxFAdFXGqewjYGxOuWJOPJ2sNYCqUGXQdu+C8d4khRdNVdoUkm2UUObQauTKHA5oj4FAigOQ4ugYgviT3XGwFSrh/GZFQ6+9hc0/AD/YjJNEU9j4jqdSbsgC9hwpWePIlGmjbxpb90ZrpxHSk+PeCCWsDDt3nEs+0CXtJUZFsIIscDrFV8yPYdljfVlc5uDE4fiibSAlAHlr1007ae7RAWvRTVYksBXX1zT6SkjNxeliCN5TypYn0c9e1sJEadbYD5h05QoJtwYfo2H4JgXNNUNn8QxfeC0iCwErUnKSp/+HcEzC4n2BR0Uq4MkPYsZPg16pLZ4hn2O9liaKHxjT+5OESFxdQYiSfFnz1j4QM6hYise+uNRT29z6NPtliS4eSizr0fxVHl+s6P9TZBZr8Zm4S6lzMmnWIK99EIYY07TsWRGtkQL6iojdfGuiYgLlTjfcgT0lYerlrRVjRwC0RBm3lwAa85FzvUMSdo0lZgVl/V4CZcG9Vru47+59D4wjc0OowBf8a8D0adT+gpn3Q8pKzSQ37T0dgszJM5oz6dcvjx0BthlsGXz1cSvGB9Mp/9hRnjW5KpXDQ791UMDq5TTRFHVnPYRJ/6VTObiVvUCsH03/eHS2xX4ZfY0rWKPRQtUXIrtugsuNioJ9fQ7vAHAl6xT5WY634yyVLWOwCmaO+4/EKzNldo8f+BPGJXSo+h/1/c5kv/kjsnklKLPFwo02gEWE7a+TNYpwRg2kqX9RT7lgl7X04S6o9l7JF8sFFMbQOgFvtG7Y65n5Gk5IRY6VpqIoCRjYFSMsCsh407F7YVd3UAFzxd0xtIXygLe90U5TAqrDKtC0Xe5qkL4OYOdPH8ZAwMi4lJKy5+OHdsG64R1JjbaGoFXTSnGCOzCXJLrGoWSoWQoRjaZy8LYH+Ssv74WpyIABEdI1YjgOxqI+cvLwWdM3C6IPFfSk3o71w/HNAstcmFuDqqPi/Roh4jfVYJnpEzj6yYLiOSEJQ/KoxMAvVBG0qRIQfcJYfcDE/gq7qsRFjkrr5CO15JFope3wJy/awkNzNqASXJt/41lmssaSyLhdz44h6yprE5FZePm6CLteReUwwFP5C9kP6WuMKGjX4W0QX9kd98YToRyx8gLEt/hegI9hWdUVW/SBOGqBiaPdO3UY1ATJyIH6eGAM=
- page_age: null
- title: National Holidays on August 14th, 2025 | Days Of The Year
- type: web_search_result
- url: https://www.daysoftheyear.com/days/aug/14/
- tool_use_id: srvtoolu_01XS7jhb5cuvySCwMkqzaCZs
- type: web_search_tool_result
- - text: |+
- Based on the search results, today is Thursday, August 14, 2025. Here are some additional details about the date:
-
- type: text
- - citations:
- - cited_text: 'August 14, 2025 is the 226th day of the year 2025 in the Gregorian calendar. There are 139 days remaining
- until the end of the year. '
- encrypted_index: EpEBCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDA6TeKmtBQt51f1xAxoMaWdsRvtoCUf61xo2IjBD2HcmHKG10DqXjPSp3iOapscYmcwPOgouSLR5uZxu7eD1v8NIZaaIsMt4/Nu+VOUqFSmD/TNQJvjtMBgPNaqVgo1Z5ZBRpBgE
- title: What Day of the Week Is August 14, 2025?
- type: web_search_result_location
- url: https://www.dayoftheweek.org/?m=August&d=14&y=2025&go=Go
- text: It is the 226th day of the year 2025 in the Gregorian calendar, with 139 days remaining until the end of the
- year
- type: text
- - text: |
- .
-
- Some interesting observances for today include:
- type: text
- - citations:
- - cited_text: August 14, 2025 - Today's holidays are Color Book Day, National Creamsicle Day, National Financial
- Awareness Day, National Navajo Code Talkers Da...
- encrypted_index: Eo8BCioIBhgCIiQ0NGFlNjc2Yy05NThmLTRkNjgtOTEwOC1lYWU5ZGU3YjM2NmISDNdN0S9eDsUEbs+abxoMEGR4NTi3doqud4VgIjCoPglUsXpHf8Rg8Sz2u2+gz0O00oDnOVM4QoTITPteuIbRLCdkSCl2kToynWQ88jIqE3u6Byx4FCxWXa+h108og1Dfkk0YBA==
- title: Holidays for August 14th, 2025 | Checkiday.com
- type: web_search_result_location
- url: https://www.checkiday.com/8/14/2025
- text: |-
- It's being celebrated as:
- - Color Book Day
- - National Creamsicle Day
- - National Financial Awareness Day
- - National Navajo Code Talkers Day
- - National Tattoo Removal Day
- - National Wiffle Ball Day
- - Social Security Day
- type: text
- id: msg_015yJEtDXJUq55EfSpwxam1f
+ id: msg_01SXTNg1wjy1KU6g4DM3b5h3
model: claude-sonnet-4-5-20250929
role: assistant
stop_reason: end_turn
@@ -154,10 +63,8 @@ interactions:
ephemeral_5m_input_tokens: 0
cache_creation_input_tokens: 0
cache_read_input_tokens: 0
- input_tokens: 11432
- output_tokens: 244
- server_tool_use:
- web_search_requests: 1
+ input_tokens: 2218
+ output_tokens: 13
service_tier: standard
status:
code: 200
@@ -171,7 +78,7 @@ interactions:
connection:
- keep-alive
content-length:
- - '946'
+ - '286'
content-type:
- application/json
host:
@@ -181,29 +88,7 @@ interactions:
input:
- content: What day is today?
role: user
- - content: Let me search to find today's date.
- role: assistant
- - content: |+
- Based on the search results, today is Thursday, August 14, 2025. Here are some additional details about the date:
-
- role: assistant
- - content: It is the 226th day of the year 2025 in the Gregorian calendar, with 139 days remaining until the end of
- the year
- role: assistant
- - content: |
- .
-
- Some interesting observances for today include:
- role: assistant
- - content: |-
- It's being celebrated as:
- - Color Book Day
- - National Creamsicle Day
- - National Financial Awareness Day
- - National Navajo Code Talkers Day
- - National Tattoo Removal Day
- - National Wiffle Ball Day
- - Social Security Day
+ - content: Today is November 19, 2025.
role: assistant
- content: What day is tomorrow?
role: user
@@ -212,7 +97,7 @@ interactions:
tool_choice: auto
tools:
- search_context_size: medium
- type: web_search_preview
+ type: web_search
uri: https://api.openai.com/v1/responses
response:
headers:
@@ -221,15 +106,15 @@ interactions:
connection:
- keep-alive
content-length:
- - '1653'
+ - '1736'
content-type:
- application/json
openai-organization:
- - pydantic-28gund
+ - user-grnwlxd1653lxdzp921aoihz
openai-processing-ms:
- - '1344'
+ - '1712'
openai-project:
- - proj_dKobscVY9YJxeEaDJen54e3d
+ - proj_FYsIItHHgnSPdHBVMzhNBWGa
openai-version:
- '2020-10-01'
strict-transport-security:
@@ -238,9 +123,11 @@ interactions:
- chunked
parsed_body:
background: false
- created_at: 1755169964
+ billing:
+ payer: developer
+ created_at: 1763595668
error: null
- id: resp_689dc4abe31c81968ed493d15d8810fe0afe80ec3d42722e
+ id: resp_0dcd74f01910b54500691e5594957481a0ac36dde76eca939f
incomplete_details: null
instructions: null
max_output_tokens: null
@@ -252,15 +139,16 @@ interactions:
- content:
- annotations: []
logprobs: []
- text: Tomorrow will be **Friday, August 15, 2025**.
+ text: Tomorrow is November 20, 2025.
type: output_text
- id: msg_689dc4acfa488196a6b1ec0ebd3bd9520afe80ec3d42722e
+ id: msg_0dcd74f01910b54500691e5596124081a087e8fa7b2ca19d5a
role: assistant
status: completed
type: message
parallel_tool_calls: true
previous_response_id: null
prompt_cache_key: null
+ prompt_cache_retention: null
reasoning:
effort: null
summary: null
@@ -275,8 +163,9 @@ interactions:
verbosity: medium
tool_choice: auto
tools:
- - search_context_size: medium
- type: web_search_preview
+ - filters: null
+ search_context_size: medium
+ type: web_search
user_location:
city: null
country: US
@@ -287,13 +176,13 @@ interactions:
top_p: 1.0
truncation: disabled
usage:
- input_tokens: 458
+ input_tokens: 329
input_tokens_details:
cached_tokens: 0
- output_tokens: 17
+ output_tokens: 12
output_tokens_details:
reasoning_tokens: 0
- total_tokens: 475
+ total_tokens: 341
user: null
status:
code: 200
diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py
index 1a19af03e0..248889eb93 100644
--- a/tests/models/test_anthropic.py
+++ b/tests/models/test_anthropic.py
@@ -8,12 +8,12 @@
from datetime import timezone
from decimal import Decimal
from functools import cached_property
-from typing import Any, TypeVar, cast
+from typing import Annotated, Any, TypeVar, cast
import httpx
import pytest
from inline_snapshot import snapshot
-from pydantic import BaseModel
+from pydantic import BaseModel, Field
from pydantic_ai import (
Agent,
@@ -52,7 +52,7 @@
BuiltinToolResultEvent, # pyright: ignore[reportDeprecated]
)
from pydantic_ai.models import ModelRequestParameters
-from pydantic_ai.output import NativeOutput, PromptedOutput, TextOutput, ToolOutput
+from pydantic_ai.output import PromptedOutput, TextOutput, ToolOutput
from pydantic_ai.result import RunUsage
from pydantic_ai.settings import ModelSettings
from pydantic_ai.usage import RequestUsage, UsageLimits
@@ -78,6 +78,7 @@
BetaMemoryTool20250818ViewCommand,
BetaMessage,
BetaMessageDeltaUsage,
+ BetaMessageTokensCount,
BetaRawContentBlockDeltaEvent,
BetaRawContentBlockStartEvent,
BetaRawContentBlockStopEvent,
@@ -181,10 +182,15 @@ async def messages_create(
self.index += 1
return response
- async def messages_count_tokens(self, *_args: Any, **_kwargs: Any) -> Any:
+ async def messages_count_tokens(self, *_args: Any, **kwargs: Any) -> BetaMessageTokensCount:
+ # check if we are configured to raise an exception
if self.messages_ is not None:
raise_if_exception(self.messages_ if not isinstance(self.messages_, Sequence) else self.messages_[0])
- return None # pragma: no cover
+
+ # record the kwargs used
+ self.chat_completion_kwargs.append({k: v for k, v in kwargs.items() if v is not NOT_GIVEN})
+
+ return BetaMessageTokensCount(input_tokens=10)
def completion_message(content: list[BetaContentBlock], usage: BetaUsage) -> BetaMessage:
@@ -594,6 +600,58 @@ def my_tool(value: str) -> str: # pragma: no cover
assert system[0]['cache_control'] == snapshot({'type': 'ephemeral', 'ttl': '5m'})
+async def test_anthropic_incompatible_schema_disables_auto_strict(allow_model_requests: None):
+ """Ensure strict mode is disabled when Anthropic cannot enforce the tool schema."""
+ c = completion_message(
+ [BetaTextBlock(text='Done', type='text')],
+ usage=BetaUsage(input_tokens=8, output_tokens=3),
+ )
+ mock_client = MockAnthropic.create_mock(c)
+ m = AnthropicModel('claude-haiku-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(m)
+
+ @agent.tool_plain
+ def constrained_tool(value: Annotated[str, Field(min_length=2)]) -> str: # pragma: no cover
+ return value
+
+ await agent.run('hello')
+
+ completion_kwargs = get_mock_chat_completion_kwargs(mock_client)[0]
+ assert 'strict' not in completion_kwargs['tools'][0]
+
+
+async def test_anthropic_mixed_strict_tool_run(allow_model_requests: None, anthropic_api_key: str):
+ """Exercise both strict=True and strict=False tool definitions against the live API."""
+ m = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(api_key=anthropic_api_key))
+ agent = Agent(
+ m,
+ system_prompt='Always call `country_source` first, then call `capital_lookup` with that result before replying.',
+ )
+
+ @agent.tool_plain(strict=True)
+ async def country_source() -> str:
+ return 'Japan'
+
+ capital_called = {'value': False}
+
+ @agent.tool_plain(strict=False)
+ async def capital_lookup(country: str) -> str:
+ capital_called['value'] = True
+ if country == 'Japan':
+ return 'Tokyo'
+ return f'Unknown capital for {country}' # pragma: no cover
+
+ result = await agent.run('Use the registered tools and respond exactly as `Capital: `.')
+ assert capital_called['value'] is True
+ assert result.output.startswith('Capital:')
+ assert any(
+ isinstance(part, ToolCallPart) and part.tool_name == 'capital_lookup'
+ for message in result.all_messages()
+ if isinstance(message, ModelResponse)
+ for part in message.parts
+ )
+
+
async def test_async_request_text_response(allow_model_requests: None):
c = completion_message(
[BetaTextBlock(text='world', type='text')],
@@ -4948,21 +5006,7 @@ async def test_anthropic_server_tool_pass_history_to_another_provider(
agent = Agent(anthropic_model, builtin_tools=[WebSearchTool()])
result = await agent.run('What day is today?')
- assert result.output == snapshot("""\
-Based on the search results, today is Thursday, August 14, 2025. Here are some additional details about the date:
-
-It is the 226th day of the year 2025 in the Gregorian calendar, with 139 days remaining until the end of the year.
-
-Some interesting observances for today include:
-It's being celebrated as:
-- Color Book Day
-- National Creamsicle Day
-- National Financial Awareness Day
-- National Navajo Code Talkers Day
-- National Tattoo Removal Day
-- National Wiffle Ball Day
-- Social Security Day\
-""")
+ assert result.output == snapshot('Today is November 19, 2025.')
result = await agent.run('What day is tomorrow?', model=openai_model, message_history=result.all_messages())
assert result.new_messages() == snapshot(
[
@@ -4973,16 +5017,16 @@ async def test_anthropic_server_tool_pass_history_to_another_provider(
ModelResponse(
parts=[
TextPart(
- content='Tomorrow will be **Friday, August 15, 2025**.',
- id='msg_689dc4acfa488196a6b1ec0ebd3bd9520afe80ec3d42722e',
+ content='Tomorrow is November 20, 2025.',
+ id='msg_0dcd74f01910b54500691e5596124081a087e8fa7b2ca19d5a',
)
],
- usage=RequestUsage(input_tokens=458, output_tokens=17, details={'reasoning_tokens': 0}),
+ usage=RequestUsage(input_tokens=329, output_tokens=12, details={'reasoning_tokens': 0}),
model_name='gpt-4.1-2025-04-14',
timestamp=IsDatetime(),
provider_name='openai',
provider_details={'finish_reason': 'completed'},
- provider_response_id='resp_689dc4abe31c81968ed493d15d8810fe0afe80ec3d42722e',
+ provider_response_id='resp_0dcd74f01910b54500691e5594957481a0ac36dde76eca939f',
finish_reason='stop',
run_id=IsStr(),
),
@@ -5393,19 +5437,6 @@ class CountryLanguage(BaseModel):
)
-async def test_anthropic_native_output(allow_model_requests: None, anthropic_api_key: str):
- m = AnthropicModel('claude-sonnet-4-5', provider=AnthropicProvider(api_key=anthropic_api_key))
-
- class CityLocation(BaseModel):
- city: str
- country: str
-
- agent = Agent(m, output_type=NativeOutput(CityLocation))
-
- with pytest.raises(UserError, match='Native structured output is not supported by this model.'):
- await agent.run('What is the largest city in the user country?')
-
-
async def test_anthropic_output_tool_with_thinking(allow_model_requests: None, anthropic_api_key: str):
m = AnthropicModel(
'claude-sonnet-4-0',
@@ -6523,6 +6554,37 @@ async def test_anthropic_model_usage_limit_not_exceeded(
)
+async def test_anthropic_count_tokens_with_mock(allow_model_requests: None):
+ """Test that count_tokens is called on the mock client."""
+ c = completion_message(
+ [BetaTextBlock(text='hello world', type='text')], BetaUsage(input_tokens=5, output_tokens=10)
+ )
+ mock_client = MockAnthropic.create_mock(c)
+ m = AnthropicModel('claude-haiku-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+ agent = Agent(m)
+
+ result = await agent.run('hello', usage_limits=UsageLimits(input_tokens_limit=20, count_tokens_before_request=True))
+ assert result.output == 'hello world'
+ assert len(mock_client.chat_completion_kwargs) == 2 # type: ignore
+ count_tokens_kwargs = mock_client.chat_completion_kwargs[0] # type: ignore
+ assert 'model' in count_tokens_kwargs
+ assert 'messages' in count_tokens_kwargs
+
+
+async def test_anthropic_count_tokens_with_no_messages(allow_model_requests: None):
+ """Test count_tokens when messages_ is None (no exception configured)."""
+ mock_client = cast(AsyncAnthropic, MockAnthropic())
+ m = AnthropicModel('claude-haiku-4-5', provider=AnthropicProvider(anthropic_client=mock_client))
+
+ result = await m.count_tokens(
+ [ModelRequest.user_text_prompt('hello')],
+ None,
+ ModelRequestParameters(),
+ )
+
+ assert result.input_tokens == 10
+
+
@pytest.mark.vcr()
async def test_anthropic_count_tokens_error(allow_model_requests: None, anthropic_api_key: str):
"""Test that errors convert to ModelHTTPError."""
diff --git a/tests/profiles/__init__.py b/tests/profiles/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/tests/profiles/test_anthropic.py b/tests/profiles/test_anthropic.py
new file mode 100644
index 0000000000..115b718b51
--- /dev/null
+++ b/tests/profiles/test_anthropic.py
@@ -0,0 +1,289 @@
+"""Tests for Anthropic JSON schema transformer and strict compatibility detection.
+
+The AnthropicJsonSchemaTransformer checks whether schemas are 'lossless' - meaning
+Anthropic's SDK won't drop validation constraints during transformation to their
+structured output format.
+
+When constraints would be dropped (making the schema 'lossy'), `is_strict_compatible`
+is set to False. This prevents automatic use of strict mode, which would make
+server-side validation impossible since the constraints wouldn't be enforced.
+
+Key concepts:
+- **Lossless**: Schema constraints are fully preserved by Anthropic's transformer
+- **Lossy**: SDK drops constraints (e.g., minLength, pattern, minItems > 1)
+- **Strict compatible**: Schema can safely use strict=True for guaranteed validation
+
+See: https://docs.claude.com/en/docs/build-with-claude/structured-outputs
+"""
+
+from __future__ import annotations as _annotations
+
+from typing import Annotated
+
+import pytest
+from inline_snapshot import snapshot
+from pydantic import BaseModel, Field
+
+from ..conftest import try_import
+
+with try_import() as imports_successful:
+ from pydantic_ai.profiles.anthropic import AnthropicJsonSchemaTransformer
+
+pytestmark = [
+ pytest.mark.skipif(not imports_successful(), reason='anthropic not installed'),
+]
+
+
+def test_show_lossless_transform():
+ """Shows that a simple model without constraints is lossless."""
+
+ class Person(BaseModel):
+ name: str
+ age: int
+
+ strict = None
+ transformer = AnthropicJsonSchemaTransformer(Person.model_json_schema(), strict=strict)
+ transformed = transformer.walk()
+
+ assert transformer.is_strict_compatible is True
+ assert transformed == snapshot(
+ {
+ 'type': 'object',
+ 'properties': {'name': {'type': 'string'}, 'age': {'type': 'integer'}},
+ 'additionalProperties': False,
+ 'required': ['name', 'age'],
+ }
+ )
+
+
+def test_show_lossy_transform():
+ """Shows that a model with validation constraints is detected as lossy."""
+
+ class Person(BaseModel):
+ name: str = Field(min_length=3)
+ age: int
+
+ original_schema = Person.model_json_schema()
+ strict = True
+ transformer = AnthropicJsonSchemaTransformer(original_schema, strict=strict)
+ transformed = transformer.walk()
+ assert original_schema == snapshot(
+ {
+ 'properties': {
+ 'name': {'minLength': 3, 'title': 'Name', 'type': 'string'},
+ 'age': {'title': 'Age', 'type': 'integer'},
+ },
+ 'required': ['name', 'age'],
+ 'title': 'Person',
+ 'type': 'object',
+ }
+ )
+ # it's not strict compatible but we forced strict=True
+ assert transformer.is_strict_compatible is False
+ # anthropic's transform_schema shoves constraints into description
+ assert transformed == snapshot(
+ {
+ 'properties': {'name': {'type': 'string', 'description': '{minLength: 3}'}, 'age': {'type': 'integer'}},
+ 'required': ['name', 'age'],
+ 'additionalProperties': False,
+ 'type': 'object',
+ }
+ )
+
+
+def test_lossless_nested_model():
+ """Nested models without constraints are lossless."""
+
+ class Address(BaseModel):
+ street: str
+ city: str
+
+ class Person(BaseModel):
+ name: str
+ address: Address
+
+ original_schema = Person.model_json_schema()
+ assert original_schema == snapshot(
+ {
+ '$defs': {
+ 'Address': {
+ 'type': 'object',
+ 'properties': {
+ 'street': {'title': 'Street', 'type': 'string'},
+ 'city': {'title': 'City', 'type': 'string'},
+ },
+ 'required': ['street', 'city'],
+ 'title': 'Address',
+ }
+ },
+ 'properties': {'name': {'title': 'Name', 'type': 'string'}, 'address': {'$ref': '#/$defs/Address'}},
+ 'required': ['name', 'address'],
+ 'title': 'Person',
+ 'type': 'object',
+ }
+ )
+ # strict=True forces transformation
+ strict = True
+ transformer = AnthropicJsonSchemaTransformer(original_schema, strict=strict)
+ transformed = transformer.walk()
+
+ assert transformer.is_strict_compatible is True
+ assert transformed == snapshot(
+ {
+ '$defs': {
+ 'Address': {
+ 'type': 'object',
+ 'properties': {'street': {'type': 'string'}, 'city': {'type': 'string'}},
+ 'additionalProperties': False,
+ 'required': ['street', 'city'],
+ }
+ },
+ 'type': 'object',
+ 'properties': {'name': {'type': 'string'}, 'address': {'$ref': '#/$defs/Address'}},
+ 'additionalProperties': False,
+ 'required': ['name', 'address'],
+ }
+ )
+
+
+def test_lossy_string_constraints():
+ """String with min_length constraint are lossy."""
+
+ class User(BaseModel):
+ username: Annotated[str, Field(min_length=3)]
+
+ original_schema = User.model_json_schema()
+ strict = None
+ transformer = AnthropicJsonSchemaTransformer(original_schema, strict=strict)
+ transformed = transformer.walk()
+
+ # SDK drops minLength, making it lossy
+ assert transformer.is_strict_compatible is False
+
+ # Original schema has minLength constraint
+ assert original_schema == snapshot(
+ {
+ 'properties': {'username': {'minLength': 3, 'title': 'Username', 'type': 'string'}},
+ 'required': ['username'],
+ 'title': 'User',
+ 'type': 'object',
+ }
+ )
+
+ # Transformed schema has constraint dropped and moved to description
+ assert transformed == snapshot(
+ {'type': 'object', 'properties': {'username': {'minLength': 3, 'type': 'string'}}, 'required': ['username']}
+ )
+
+
+def test_lossy_number_constraints():
+ """Number with minimum constraint should be lossy (constraint gets dropped)."""
+
+ class Product(BaseModel):
+ price: Annotated[float, Field(ge=0)]
+
+ strict = None
+ transformer = AnthropicJsonSchemaTransformer(Product.model_json_schema(), strict=strict)
+ transformed = transformer.walk()
+
+ # SDK drops minimum, making it lossy
+ assert transformer.is_strict_compatible is False
+ # Transformed schema has constraint dropped and moved to description
+ assert transformed == snapshot(
+ {'type': 'object', 'properties': {'price': {'minimum': 0, 'type': 'number'}}, 'required': ['price']}
+ )
+
+
+def test_lossy_pattern_constraint():
+ """String with pattern constraint should be lossy (constraint gets dropped)."""
+
+ class Email(BaseModel):
+ address: Annotated[str, Field(pattern=r'^[\w\.-]+@[\w\.-]+\.\w+$')]
+
+ original_schema = Email.model_json_schema()
+ strict = None
+ transformer = AnthropicJsonSchemaTransformer(original_schema, strict=strict)
+ transformed = transformer.walk()
+
+ # SDK drops pattern, making it lossy
+ assert transformer.is_strict_compatible is False
+
+ # Original schema has pattern constraint
+ assert original_schema == snapshot(
+ {
+ 'properties': {
+ 'address': {'pattern': '^[\\w\\.-]+@[\\w\\.-]+\\.\\w+$', 'title': 'Address', 'type': 'string'}
+ },
+ 'required': ['address'],
+ 'title': 'Email',
+ 'type': 'object',
+ }
+ )
+
+ # Transformed schema has constraint dropped and moved to description
+ assert transformed == snapshot(
+ {
+ 'type': 'object',
+ 'properties': {'address': {'pattern': '^[\\w\\.-]+@[\\w\\.-]+\\.\\w+$', 'type': 'string'}},
+ 'required': ['address'],
+ }
+ )
+
+
+def test_strict_false_no_transformation():
+ """When strict=False, no transformation is applied."""
+
+ class User(BaseModel):
+ username: Annotated[str, Field(min_length=3)]
+
+ strict = False
+ transformer = AnthropicJsonSchemaTransformer(User.model_json_schema(), strict=strict)
+ transformed = transformer.walk()
+
+ # `'minLength': 3` proves no transformation occurred
+ assert transformed == snapshot(
+ {'type': 'object', 'properties': {'username': {'minLength': 3, 'type': 'string'}}, 'required': ['username']}
+ )
+
+
+def test_lossy_array_items_with_constraints():
+ """Detect lossy changes in array items with inline validation constraints."""
+
+ class StringList(BaseModel):
+ items: list[Annotated[str, Field(min_length=1)]]
+
+ original_schema = StringList.model_json_schema()
+ strict = None
+ transformer = AnthropicJsonSchemaTransformer(original_schema, strict=strict)
+ transformed = transformer.walk()
+
+ # Array items have inline minLength constraint which gets dropped
+ assert transformer.is_strict_compatible is False
+ assert transformed == snapshot(
+ {
+ 'type': 'object',
+ # `'minLength': 1` proves no transformation occurred
+ 'properties': {'items': {'type': 'array', 'items': {'minLength': 1, 'type': 'string'}}},
+ 'required': ['items'],
+ }
+ )
+
+
+def test_lossy_schema_with_defs():
+ """Detect lossy changes in schemas using $defs with validation constraints."""
+
+ class UserProfile(BaseModel):
+ name: Annotated[str, Field(min_length=3)]
+ age: int
+
+ class Account(BaseModel):
+ profile: UserProfile
+ backup_profile: UserProfile | None = None
+
+ original_schema = Account.model_json_schema()
+ strict = None
+ transformer = AnthropicJsonSchemaTransformer(original_schema, strict=strict)
+ transformer.walk()
+
+ # UserProfile is in $defs with minLength constraint which gets dropped
+ assert transformer.is_strict_compatible is False
diff --git a/tests/providers/test_openrouter.py b/tests/providers/test_openrouter.py
index 400c789fb0..ffd54bdab5 100644
--- a/tests/providers/test_openrouter.py
+++ b/tests/providers/test_openrouter.py
@@ -9,7 +9,7 @@
from pydantic_ai.agent import Agent
from pydantic_ai.exceptions import UserError
from pydantic_ai.profiles.amazon import amazon_model_profile
-from pydantic_ai.profiles.anthropic import anthropic_model_profile
+from pydantic_ai.profiles.anthropic import AnthropicJsonSchemaTransformer, anthropic_model_profile
from pydantic_ai.profiles.cohere import cohere_model_profile
from pydantic_ai.profiles.deepseek import deepseek_model_profile
from pydantic_ai.profiles.google import GoogleJsonSchemaTransformer, google_model_profile
@@ -124,7 +124,7 @@ def test_openrouter_provider_model_profile(mocker: MockerFixture):
anthropic_profile = provider.model_profile('anthropic/claude-3.5-sonnet')
anthropic_model_profile_mock.assert_called_with('claude-3.5-sonnet')
assert anthropic_profile is not None
- assert anthropic_profile.json_schema_transformer == OpenAIJsonSchemaTransformer
+ assert anthropic_profile.json_schema_transformer == AnthropicJsonSchemaTransformer
mistral_profile = provider.model_profile('mistralai/mistral-large-2407')
mistral_model_profile_mock.assert_called_with('mistral-large-2407')
diff --git a/tests/providers/test_vercel.py b/tests/providers/test_vercel.py
index 91c26f9c8c..3e60456cdb 100644
--- a/tests/providers/test_vercel.py
+++ b/tests/providers/test_vercel.py
@@ -7,7 +7,7 @@
from pydantic_ai._json_schema import InlineDefsJsonSchemaTransformer
from pydantic_ai.exceptions import UserError
from pydantic_ai.profiles.amazon import amazon_model_profile
-from pydantic_ai.profiles.anthropic import anthropic_model_profile
+from pydantic_ai.profiles.anthropic import AnthropicJsonSchemaTransformer, anthropic_model_profile
from pydantic_ai.profiles.cohere import cohere_model_profile
from pydantic_ai.profiles.deepseek import deepseek_model_profile
from pydantic_ai.profiles.google import GoogleJsonSchemaTransformer, google_model_profile
@@ -82,7 +82,7 @@ def test_vercel_provider_model_profile(mocker: MockerFixture):
profile = provider.model_profile('anthropic/claude-sonnet-4-5')
anthropic_mock.assert_called_with('claude-sonnet-4-5')
assert profile is not None
- assert profile.json_schema_transformer == OpenAIJsonSchemaTransformer
+ assert profile.json_schema_transformer == AnthropicJsonSchemaTransformer
# Test bedrock provider
profile = provider.model_profile('bedrock/anthropic.claude-sonnet-4-5')
diff --git a/uv.lock b/uv.lock
index f603af183f..84bfbbecf7 100644
--- a/uv.lock
+++ b/uv.lock
@@ -1,5 +1,5 @@
version = 1
-revision = 3
+revision = 2
requires-python = ">=3.10"
resolution-markers = [
"python_full_version >= '3.13' and platform_python_implementation == 'PyPy'",
@@ -221,7 +221,7 @@ wheels = [
[[package]]
name = "anthropic"
-version = "0.70.0"
+version = "0.74.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
@@ -233,9 +233,9 @@ dependencies = [
{ name = "sniffio" },
{ name = "typing-extensions" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/1b/be/a80a8678d39d77b2325b1a32a55d62ca9dc376984a3d66d351229d37da9c/anthropic-0.70.0.tar.gz", hash = "sha256:24078275246636d9fd38c94bb8cf64799ce7fc6bbad379422b36fa86b3e4deee", size = 480930, upload-time = "2025-10-15T16:54:33.577Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/5b/f9/baa1b885c8664b446e6a13003938046901e54ffd70b532bbebd01256e34b/anthropic-0.74.0.tar.gz", hash = "sha256:114ec10cb394b6764e199da06335da4747b019c5629e53add33572f66964ad99", size = 428958, upload-time = "2025-11-18T15:29:47.579Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/a3/81/da287ba25b9f8a16d27e822b3f2dad6ddf005fba3e3696f5dce818383850/anthropic-0.70.0-py3-none-any.whl", hash = "sha256:fa7d0dee6f2b871faa7cd0b77f6047e8006d5863618804204cf34b1b95819971", size = 337327, upload-time = "2025-10-15T16:54:32.087Z" },
+ { url = "https://files.pythonhosted.org/packages/61/27/8c404b290ec650e634eacc674df943913722ec21097b0476d68458250c2f/anthropic-0.74.0-py3-none-any.whl", hash = "sha256:df29b8dfcdbd2751fa31177f643d8d8f66c5315fe06bdc42f9139e9f00d181d5", size = 371474, upload-time = "2025-11-18T15:29:45.748Z" },
]
[[package]]
@@ -2763,6 +2763,7 @@ version = "0.7.30"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/bf/38/d1ef3ae08d8d857e5e0690c5b1e07bf7eb4a1cae5881d87215826dc6cadb/llguidance-0.7.30.tar.gz", hash = "sha256:e93bf75f2b6e48afb86a5cee23038746975e1654672bf5ba0ae75f7d4d4a2248", size = 1055528, upload-time = "2025-06-23T00:23:49.247Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/b3/e1/694c89986fcae7777184fc8b22baa0976eba15a6847221763f6ad211fc1f/llguidance-0.7.30-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:c80af02c118d2b0526bcecaab389af2ed094537a069b0fc724cd2a2f2ba3990f", size = 3327974, upload-time = "2025-06-23T00:23:47.556Z" },
{ url = "https://files.pythonhosted.org/packages/fd/77/ab7a548ae189dc23900fdd37803c115c2339b1223af9e8eb1f4329b5935a/llguidance-0.7.30-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:00a256d532911d2cf5ba4ef63e182944e767dd2402f38d63002016bc37755958", size = 3210709, upload-time = "2025-06-23T00:23:45.872Z" },
{ url = "https://files.pythonhosted.org/packages/9c/5b/6a166564b14f9f805f0ea01ec233a84f55789cb7eeffe1d6224ccd0e6cdd/llguidance-0.7.30-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af8741c867e4bc7e42f7cdc68350c076b4edd0ca10ecefbde75f15a9f6bc25d0", size = 14867038, upload-time = "2025-06-23T00:23:39.571Z" },
{ url = "https://files.pythonhosted.org/packages/af/80/5a40b9689f17612434b820854cba9b8cabd5142072c491b5280fe5f7a35e/llguidance-0.7.30-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9edc409b9decd6cffba5f5bf3b4fbd7541f95daa8cbc9510cbf96c6ab1ffc153", size = 15004926, upload-time = "2025-06-23T00:23:43.965Z" },
@@ -5658,7 +5659,7 @@ vertexai = [
[package.metadata]
requires-dist = [
{ name = "ag-ui-protocol", marker = "extra == 'ag-ui'", specifier = ">=0.1.8" },
- { name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.70.0" },
+ { name = "anthropic", marker = "extra == 'anthropic'", specifier = ">=0.74.0" },
{ name = "argcomplete", marker = "extra == 'cli'", specifier = ">=3.5.0" },
{ name = "boto3", marker = "extra == 'bedrock'", specifier = ">=1.40.14" },
{ name = "cohere", marker = "sys_platform != 'emscripten' and extra == 'cohere'", specifier = ">=5.18.0" },
@@ -8591,14 +8592,17 @@ dependencies = [
]
sdist = { url = "https://files.pythonhosted.org/packages/f2/a9/dc3c63cf7f082d183711e46ef34d10d8a135c2319dc581905d79449f52ea/xgrammar-0.1.25.tar.gz", hash = "sha256:70ce16b27e8082f20808ed759b0733304316facc421656f0f30cfce514b5b77a", size = 2297187, upload-time = "2025-09-21T05:58:58.942Z" }
wheels = [
+ { url = "https://files.pythonhosted.org/packages/c0/b4/8f78b56ebf64f161258f339cc5898bf761b4fb6c6805d0bca1bcaaaef4a1/xgrammar-0.1.25-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:d12d1078ee2b5c1531610489b433b77694a7786210ceb2c0c1c1eb058e9053c7", size = 679074, upload-time = "2025-09-21T05:58:20.344Z" },
{ url = "https://files.pythonhosted.org/packages/52/38/b57120b73adcd342ef974bff14b2b584e7c47edf28d91419cb9325fd5ef2/xgrammar-0.1.25-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c2e940541b7cddf3ef55a70f20d4c872af7f0d900bc0ed36f434bf7212e2e729", size = 622668, upload-time = "2025-09-21T05:58:22.269Z" },
{ url = "https://files.pythonhosted.org/packages/19/8d/64430d01c21ca2b1d8c5a1ed47c90f8ac43717beafc9440d01d81acd5cfc/xgrammar-0.1.25-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2063e1c72f0c00f47ac8ce7ce0fcbff6fa77f79012e063369683844e2570c266", size = 8517569, upload-time = "2025-09-21T05:58:23.77Z" },
{ url = "https://files.pythonhosted.org/packages/b1/c4/137d0e9cd038ff4141752c509dbeea0ec5093eb80815620c01b1f1c26d0a/xgrammar-0.1.25-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9785eafa251c996ebaa441f3b8a6c037538930104e265a64a013da0e6fd2ad86", size = 8709188, upload-time = "2025-09-21T05:58:26.246Z" },
{ url = "https://files.pythonhosted.org/packages/6c/3d/c228c470d50865c9db3fb1e75a95449d0183a8248519b89e86dc481d6078/xgrammar-0.1.25-cp310-cp310-win_amd64.whl", hash = "sha256:42ecefd020038b3919a473fe5b9bb9d8d809717b8689a736b81617dec4acc59b", size = 698919, upload-time = "2025-09-21T05:58:28.368Z" },
+ { url = "https://files.pythonhosted.org/packages/9e/b7/ca0ff7c91f24b2302e94b0e6c2a234cc5752b10da51eb937e7f2aa257fde/xgrammar-0.1.25-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:27d7ac4be05cf9aa258c109a8647092ae47cb1e28df7d27caced6ab44b72b799", size = 678801, upload-time = "2025-09-21T05:58:29.936Z" },
{ url = "https://files.pythonhosted.org/packages/43/cd/fdf4fb1b5f9c301d381656a600ad95255a76fa68132978af6f06e50a46e1/xgrammar-0.1.25-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:151c1636188bc8c5cdf318cefc5ba23221c9c8cc07cb392317fb3f7635428150", size = 622565, upload-time = "2025-09-21T05:58:31.185Z" },
{ url = "https://files.pythonhosted.org/packages/55/04/55a87e814bcab771d3e4159281fa382b3d5f14a36114f2f9e572728da831/xgrammar-0.1.25-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35fc135650aa204bf84db7fe9c0c0f480b6b11419fe47d89f4bd21602ac33be9", size = 8517238, upload-time = "2025-09-21T05:58:32.835Z" },
{ url = "https://files.pythonhosted.org/packages/31/f6/3c5210bc41b61fb32b66bf5c9fd8ec5edacfeddf9860e95baa9caa9a2c82/xgrammar-0.1.25-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc19d6d7e8e51b6c9a266e949ac7fb3d2992447efeec7df32cca109149afac18", size = 8709514, upload-time = "2025-09-21T05:58:34.727Z" },
{ url = "https://files.pythonhosted.org/packages/21/de/85714f307536b328cc16cc6755151865e8875378c8557c15447ca07dff98/xgrammar-0.1.25-cp311-cp311-win_amd64.whl", hash = "sha256:8fcb24f5a7acd5876165c50bd51ce4bf8e6ff897344a5086be92d1fe6695f7fe", size = 698722, upload-time = "2025-09-21T05:58:36.411Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/d7/a7bdb158afa88af7e6e0d312e9677ba5fb5e423932008c9aa2c45af75d5d/xgrammar-0.1.25-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:96500d7578c46e8551253b9211b02e02f54e147bc290479a64717d80dcf4f7e3", size = 678250, upload-time = "2025-09-21T05:58:37.936Z" },
{ url = "https://files.pythonhosted.org/packages/10/9d/b20588a3209d544a3432ebfcf2e3b1a455833ee658149b08c18eef0c6f59/xgrammar-0.1.25-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:73ba9031e359447af53ce89dfb0775e7b9f4b358d513bcc28a6b4deace661dd5", size = 621550, upload-time = "2025-09-21T05:58:39.464Z" },
{ url = "https://files.pythonhosted.org/packages/99/9c/39bb38680be3b6d6aa11b8a46a69fb43e2537d6728710b299fa9fc231ff0/xgrammar-0.1.25-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c519518ebc65f75053123baaf23776a21bda58f64101a64c2fc4aa467c9cd480", size = 8519097, upload-time = "2025-09-21T05:58:40.831Z" },
{ url = "https://files.pythonhosted.org/packages/c6/c2/695797afa9922c30c45aa94e087ad33a9d87843f269461b622a65a39022a/xgrammar-0.1.25-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47fdbfc6007df47de2142613220292023e88e4a570546b39591f053e4d9ec33f", size = 8712184, upload-time = "2025-09-21T05:58:43.142Z" },