diff --git a/pyproject.toml b/pyproject.toml index a6c52299..2ff3afbc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,8 +26,8 @@ classifiers = [ "Topic :: Software Development :: Libraries :: Python Modules", ] dependencies = [ - "boto3>=1.42.86", - "botocore>=1.42.86", + "boto3>=1.43.0", + "botocore>=1.43.0", "pydantic>=2.0.0,<2.41.3", "urllib3>=1.26.0", "starlette>=0.46.2", @@ -167,3 +167,7 @@ strands-agents = [ strands-agents-evals = [ "strands-agents-evals>=0.1.0" ] +simulation = [ + "jinja2>=3.1.0", + "strands-agents-evals>=0.1.0", +] diff --git a/src/bedrock_agentcore/_utils/endpoints.py b/src/bedrock_agentcore/_utils/endpoints.py index a8e8f528..9f37fba9 100644 --- a/src/bedrock_agentcore/_utils/endpoints.py +++ b/src/bedrock_agentcore/_utils/endpoints.py @@ -7,7 +7,7 @@ # Environment-configurable constants with fallback defaults DP_ENDPOINT_OVERRIDE = os.getenv("BEDROCK_AGENTCORE_DP_ENDPOINT") CP_ENDPOINT_OVERRIDE = os.getenv("BEDROCK_AGENTCORE_CP_ENDPOINT") -DEFAULT_REGION = os.getenv("AWS_REGION", "us-west-2") +DEFAULT_REGION = os.getenv("AWS_REGION") or os.getenv("AWS_DEFAULT_REGION") or "us-west-2" # Regex for valid AWS region names (e.g., us-east-1, eu-west-2, cn-north-1, us-gov-west-1). # Uses \A and \Z anchors to prevent newline injection bypass that $ allows. diff --git a/src/bedrock_agentcore/config_bundle/__init__.py b/src/bedrock_agentcore/config_bundle/__init__.py new file mode 100644 index 00000000..6432f4a6 --- /dev/null +++ b/src/bedrock_agentcore/config_bundle/__init__.py @@ -0,0 +1,9 @@ +"""Configuration bundle support for BedrockAgentCore.""" + +from .bundle import ConfigBundleRef +from .client import ConfigBundleClient + +__all__ = [ + "ConfigBundleRef", + "ConfigBundleClient", +] diff --git a/src/bedrock_agentcore/config_bundle/baggage.py b/src/bedrock_agentcore/config_bundle/baggage.py new file mode 100644 index 00000000..9db2faed --- /dev/null +++ b/src/bedrock_agentcore/config_bundle/baggage.py @@ -0,0 +1,97 @@ +"""Parse W3C baggage headers for configuration bundle references.""" + +import logging +from typing import Any, Dict, List, Optional +from urllib.parse import unquote + +from .bundle import ConfigBundleRef + +BAGGAGE_HEADER = "baggage" + +logger = logging.getLogger(__name__) + + +def _extract_baggage(headers: Any) -> Dict[str, List[str]]: + """Extract all W3C baggage entries from request headers into a multi-value dict. + + Args: + headers: A Starlette ``Headers`` object or a list of ``(name, value)`` + tuples. Must preserve duplicate header names as separate entries so + that multiple ``baggage`` headers are each processed independently. + A plain ``dict`` is not suitable — it can only hold one ``baggage`` + entry and will silently drop the rest. + + Returns: + A dict mapping each baggage key to a list of its decoded values in the + order they were encountered. A key that appears in more than one + ``baggage`` header, or more than once within a single header value, + accumulates one entry per occurrence. + + Notes: + - Header name matching is case-insensitive (``Baggage`` == ``baggage``). + - Per-entry properties (the ``;property=value`` suffix) are stripped + before the value is returned. + - Values are percent-decoded (``%XX`` → character). + - Entries with no ``=`` sign, an empty key, or an empty value are skipped. + """ + result: Dict[str, List[str]] = {} + items = headers.items() if hasattr(headers, "items") else headers + for key, value in items: + if key.lower() != BAGGAGE_HEADER: + continue + for item in value.split(","): + item = item.strip() + if not item: + continue + if "=" not in item: + logger.warning("Skipping malformed baggage entry (no '='): %r", item) + continue + entry_key, _, entry_value = item.partition("=") + entry_key = entry_key.strip() + if not entry_key: + logger.warning("Skipping baggage entry with empty key in: %r", item) + continue + decoded_value = unquote(entry_value.split(";")[0].strip()) + if not decoded_value: + logger.warning("Skipping baggage entry with empty value for key %r", entry_key) + continue + result.setdefault(entry_key, []).append(decoded_value) + return result + + +def _parse_config_bundle_baggage(all_baggage: Dict[str, List[str]]) -> Optional[ConfigBundleRef]: + """Build a ``ConfigBundleRef`` from extracted baggage entries, or ``None`` if absent. + + Expects ``all_baggage`` to have been produced by :func:`_extract_baggage`. + The two keys used are: + + - ``aws.agentcore.configbundle_arn`` — full ARN of the configuration bundle + - ``aws.agentcore.configbundle_version`` — version ID of the bundle + + Only a single bundle is supported:: + + baggage: aws.agentcore.configbundle_arn=,aws.agentcore.configbundle_version= + + If multiple values are present for either key, only the first is used and a + warning is logged. + + Args: + all_baggage: Multi-value baggage dict from :func:`_extract_baggage`. + + Returns: + A ``ConfigBundleRef`` when both keys are present and valid, otherwise ``None``. + """ + arns = all_baggage.get("aws.agentcore.configbundle_arn", []) + versions = all_baggage.get("aws.agentcore.configbundle_version", []) + + if not arns or not versions: + return None + + if len(arns) > 1 or len(versions) > 1: + logger.warning("Multiple config bundle ARNs/versions found in baggage — only the first will be used") + + try: + return ConfigBundleRef(bundle_arn=arns[0], bundle_version=versions[0]) + except ValueError as e: + logger.warning("Skipping invalid config bundle ref (arn=%r, version=%r): %s", arns[0], versions[0], e) + return None diff --git a/src/bedrock_agentcore/config_bundle/bundle.py b/src/bedrock_agentcore/config_bundle/bundle.py new file mode 100644 index 00000000..85bb17eb --- /dev/null +++ b/src/bedrock_agentcore/config_bundle/bundle.py @@ -0,0 +1,34 @@ +"""Configuration bundle reference model.""" + +from dataclasses import dataclass +from typing import Any, Dict + +# ComponentConfigurationMap value: {componentId: {"configuration": }} +ConfigBundleComponents = Dict[str, Dict[str, Any]] + + +@dataclass(frozen=True) +class ConfigBundleRef: + """Lightweight reference to a configuration bundle version, parsed from OTEL baggage. + + .. warning:: + This feature is in preview and may change in future releases. + """ + + bundle_arn: str + bundle_version: str + + def __post_init__(self) -> None: + """Validate bundle ARN and version.""" + if not self.bundle_arn: + raise ValueError("bundle_arn must not be empty") + if not self.bundle_version: + raise ValueError("bundle_version must not be empty") + parts = self.bundle_arn.rsplit("/", 1) + if len(parts) != 2 or not parts[1]: + raise ValueError(f"bundle_arn does not contain a valid bundle ID segment: {self.bundle_arn!r}") + + @property + def bundle_id(self) -> str: + """Extract bundle ID from ARN (last path segment after '/').""" + return self.bundle_arn.rsplit("/", 1)[-1] diff --git a/src/bedrock_agentcore/config_bundle/client.py b/src/bedrock_agentcore/config_bundle/client.py new file mode 100644 index 00000000..35ca6fcf --- /dev/null +++ b/src/bedrock_agentcore/config_bundle/client.py @@ -0,0 +1,75 @@ +"""Client for fetching configuration bundle versions from the AgentCore control plane.""" + +import logging +import threading +from typing import Optional + +import boto3 + +from .._utils.endpoints import DEFAULT_REGION, get_control_plane_endpoint + +logger = logging.getLogger(__name__) + +_ALLOWED_OPERATIONS = frozenset( + { + "create_configuration_bundle", + "delete_configuration_bundle", + "get_configuration_bundle", + "get_configuration_bundle_version", + "list_configuration_bundle_versions", + "list_configuration_bundles", + "update_configuration_bundle", + } +) + + +class ConfigBundleClient: + """Client for AgentCore configuration bundle operations. + + .. warning:: + This feature is in preview and may change in future releases. + + Wraps the ``bedrock-agentcore-control`` boto3 client and forwards all method + calls to it via ``__getattr__``, so any boto3 operation (e.g. + ``get_configuration_bundle_version``, ``list_configuration_bundles``) is + available without explicit definitions. + + Intended to be created once at application startup and reused across requests. + The underlying boto3 client is created lazily on first use so that agents + which never receive config bundle baggage incur no startup overhead. + """ + + def __init__(self, region_name: Optional[str] = None, boto3_session: Optional[boto3.Session] = None): + """Initialise the client with an optional region and boto3 session.""" + self._region = region_name or DEFAULT_REGION + self._boto3_session = boto3_session + self._client = None + self._client_lock = threading.Lock() + + def _get_client(self): + # Use __dict__ directly to avoid triggering __getattr__ if _client is + # not yet set (e.g. during unpickling before __init__ completes). + if self.__dict__.get("_client") is None: + with self._client_lock: + if self.__dict__.get("_client") is None: + session = self._boto3_session or boto3.Session() + self._client = session.client( + "bedrock-agentcore-control", + region_name=self._region, + endpoint_url=get_control_plane_endpoint(self._region), + ) + return self._client + + def __getattr__(self, name: str): + """Forward configuration bundle method calls to the underlying boto3 client. + + Only operations in ``_ALLOWED_OPERATIONS`` are exposed. Attempts to call + any other operation raise ``AttributeError``. + + Uses ``object.__getattribute__`` to access ``_get_client`` so that if Python + looks up dunder attributes during unpickling or deepcopy before instance + attributes are initialised, this method does not recurse into itself. + """ + if name not in _ALLOWED_OPERATIONS: + raise AttributeError(f"'{type(self).__name__}' does not expose operation '{name}'") + return getattr(object.__getattribute__(self, "_get_client")(), name) diff --git a/src/bedrock_agentcore/evaluation/__init__.py b/src/bedrock_agentcore/evaluation/__init__.py index ddcf7b07..57b0c277 100644 --- a/src/bedrock_agentcore/evaluation/__init__.py +++ b/src/bedrock_agentcore/evaluation/__init__.py @@ -6,15 +6,31 @@ EvaluatorOutput, custom_code_based_evaluator, ) +from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_models import ( + BatchEvaluationResult, + BatchEvaluationRunConfig, + BatchEvaluationSummary, + BatchEvaluatorConfig, + CloudWatchDataSourceConfig, + CloudWatchOutputDataConfig, + EvaluatorStatistics, + EvaluatorSummary, + FailedScenario, +) +from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_runner import ( + BatchEvaluationRunner, +) from bedrock_agentcore.evaluation.runner.dataset_providers import ( DatasetProvider, FileDatasetProvider, ) from bedrock_agentcore.evaluation.runner.dataset_types import ( + ActorProfile, Dataset, Input, PredefinedScenario, Scenario, + SimulatedScenario, Turn, ) from bedrock_agentcore.evaluation.runner.invoker_types import ( @@ -34,6 +50,8 @@ ScenarioExecutionResult, ScenarioExecutor, ScenarioResult, + SimulatedScenarioExecutor, + SimulationConfig, ) from bedrock_agentcore.evaluation.span_to_adot_serializer import ( convert_strands_to_adot, @@ -43,7 +61,18 @@ ) __all__ = [ + "ActorProfile", "AgentInvokerFn", + "BatchEvaluationRunner", + "BatchEvaluationResult", + "BatchEvaluationRunConfig", + "CloudWatchOutputDataConfig", + "CloudWatchDataSourceConfig", + "BatchEvaluatorConfig", + "BatchEvaluationSummary", + "EvaluatorStatistics", + "EvaluatorSummary", + "FailedScenario", "AgentInvokerInput", "AgentInvokerOutput", "CloudWatchAgentSpanCollector", @@ -65,10 +94,13 @@ "ScenarioExecutor", "ScenarioResult", "AgentSpanCollector", + "SimulationConfig", "StrandsEvalsAgentCoreEvaluator", "Turn", "PredefinedScenario", "PredefinedScenarioExecutor", + "SimulatedScenario", + "SimulatedScenarioExecutor", "custom_code_based_evaluator", "convert_strands_to_adot", "create_strands_evaluator", diff --git a/src/bedrock_agentcore/evaluation/runner/__init__.py b/src/bedrock_agentcore/evaluation/runner/__init__.py index b047028e..cb380357 100644 --- a/src/bedrock_agentcore/evaluation/runner/__init__.py +++ b/src/bedrock_agentcore/evaluation/runner/__init__.py @@ -2,10 +2,12 @@ from .dataset_providers import DatasetProvider, FileDatasetProvider from .dataset_types import ( + ActorProfile, Dataset, Input, PredefinedScenario, Scenario, + SimulatedScenario, Turn, ) from .invoker_types import ( @@ -22,33 +24,43 @@ EvaluatorResult, OnDemandEvaluationDatasetRunner, ScenarioResult, + SimulationConfig, ) from .scenario_executor import ( + AgentCoreActorSimulator, PredefinedScenarioExecutor, ScenarioExecutionResult, ScenarioExecutor, + SimulatedScenarioExecutor, + SimulatorResult, ) __all__ = [ + "ActorProfile", "AgentInvokerFn", "AgentInvokerInput", "AgentInvokerOutput", + "AgentSpanCollector", "CloudWatchAgentSpanCollector", "Dataset", "DatasetProvider", "EvaluationResult", "EvaluationRunConfig", - "OnDemandEvaluationDatasetRunner", "EvaluatorConfig", "EvaluatorResult", "FileDatasetProvider", "Input", + "OnDemandEvaluationDatasetRunner", + "PredefinedScenario", + "AgentCoreActorSimulator", + "PredefinedScenarioExecutor", "Scenario", "ScenarioExecutionResult", "ScenarioExecutor", + "SimulatorResult", "ScenarioResult", - "AgentSpanCollector", + "SimulatedScenario", + "SimulatedScenarioExecutor", + "SimulationConfig", "Turn", - "PredefinedScenario", - "PredefinedScenarioExecutor", ] diff --git a/src/bedrock_agentcore/evaluation/runner/batch/batch_evaluation_models.py b/src/bedrock_agentcore/evaluation/runner/batch/batch_evaluation_models.py new file mode 100644 index 00000000..3e5b93b1 --- /dev/null +++ b/src/bedrock_agentcore/evaluation/runner/batch/batch_evaluation_models.py @@ -0,0 +1,307 @@ +"""Data models for batch evaluation: session source configs, evaluator config, and results.""" + +import logging +import time +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, ConfigDict, Field, alias_generators, model_validator + +from bedrock_agentcore.evaluation.runner.dataset_types import SimulationConfig + +logger = logging.getLogger(__name__) + + +# --------------------------------------------------------------------------- +# Data source configs +# --------------------------------------------------------------------------- + + +class DataSourceConfig(ABC): + """Abstract base for session span sources passed to the evaluation API. + + .. warning:: + This feature is in preview and may change in future releases. + + Subclass this to support any DataSourceConfig union member + (cloudWatchLogs or future additions). + """ + + def pre_evaluation_run_hook(self) -> None: + """Called by the runner after agent invocation, before the evaluation API call. + + Override to add source-specific pre-run behavior such as waiting for + span ingestion or validating that spans are available. + + Note: + Implementations may block the calling thread (e.g. to wait for + CloudWatch ingestion). The runner invokes this synchronously, so + long-running hooks will delay the evaluation API call by the full + duration of the hook. + """ + return None + + @abstractmethod + def to_data_source_config( + self, + session_ids: List[str], + start_time: datetime, + end_time: datetime, + ) -> Dict[str, Any]: + """Return the dataSourceConfig dict for the evaluation API call. + + The runner always provides all three arguments after agent invocation. + Implementations use what they need and ignore the rest. + + Args: + session_ids: Session IDs generated during agent invocation. + start_time: Earliest session start time across all invocations. + end_time: Latest session end time across all invocations. + + Returns: + Dict matching one member of the DataSourceConfig union. + """ + + +class CloudWatchDataSourceConfig(BaseModel, DataSourceConfig): + """CloudWatch data source — pulls spans from CloudWatch log groups. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + service_names: Service names for span filtering. The API accepts exactly one (list of length 1). + log_group_names: CloudWatch log group names to search (1–5). + ingestion_delay_seconds: Seconds to wait for spans to appear in + CloudWatch before submitting the evaluation run. Defaults to 180. + This sleep blocks the calling thread for the full duration; set + to 0 to skip the wait. + """ + + service_names: List[str] = Field(min_length=1, max_length=1) + log_group_names: List[str] = Field(min_length=1, max_length=5) + ingestion_delay_seconds: int = Field(default=180, ge=0) + + def pre_evaluation_run_hook(self) -> None: + """Wait for CloudWatch span ingestion before submitting the evaluation run.""" + if self.ingestion_delay_seconds > 0: + logger.info("Waiting %ds for CloudWatch span ingestion...", self.ingestion_delay_seconds) + time.sleep(self.ingestion_delay_seconds) + + def to_data_source_config( + self, + session_ids: List[str], + start_time: datetime, + end_time: datetime, + ) -> Dict[str, Any]: + """Return a cloudWatchLogs dataSourceConfig dict for the evaluation API.""" + return { + "cloudWatchLogs": { + "serviceNames": self.service_names, + "logGroupNames": self.log_group_names, + "filterConfig": { + "sessionIds": session_ids, + "timeRange": { + "startTime": start_time, + "endTime": end_time, + }, + }, + } + } + + +# --------------------------------------------------------------------------- +# Batch eval result models +# --------------------------------------------------------------------------- + + +class FailedScenario(BaseModel): + """Information about a scenario that failed during invocation. + + Attributes: + scenario_id: Scenario identifier. + error_message: Error description. + """ + + scenario_id: str + error_message: str + + +class EvaluatorStatistics(BaseModel): + """Statistics for an evaluator. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + average_score: Average evaluation score across all evaluations. + """ + + model_config = ConfigDict(alias_generator=alias_generators.to_camel, populate_by_name=True) + + average_score: Optional[float] = None + + +class EvaluatorSummary(BaseModel): + """Summary statistics for a single evaluator. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + evaluator_id: Evaluator identifier. + statistics: Aggregated statistics (average score). + total_evaluated: Number of items evaluated. + total_failed: Number of evaluation failures. + """ + + model_config = ConfigDict(alias_generator=alias_generators.to_camel, populate_by_name=True) + + evaluator_id: Optional[str] = None + statistics: Optional[EvaluatorStatistics] = None + total_evaluated: Optional[int] = None + total_failed: Optional[int] = None + + +class BatchEvaluationSummary(BaseModel): + """Aggregated results from a completed batch evaluation. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + number_of_sessions_completed: Number of sessions that were successfully evaluated. + number_of_sessions_in_progress: Number of sessions still being evaluated (non-zero + only in intermediate states). + number_of_sessions_failed: Number of sessions that failed evaluation. + total_number_of_sessions: Total number of sessions submitted for evaluation. + number_of_sessions_ignored: Number of sessions that were ignored. + evaluator_summaries: Per-evaluator statistics including average score. + """ + + model_config = ConfigDict(alias_generator=alias_generators.to_camel, populate_by_name=True) + + number_of_sessions_completed: Optional[int] = None + number_of_sessions_in_progress: Optional[int] = None + number_of_sessions_failed: Optional[int] = None + total_number_of_sessions: Optional[int] = None + number_of_sessions_ignored: Optional[int] = None + evaluator_summaries: Optional[List[EvaluatorSummary]] = None + + +class CloudWatchOutputDataConfig(BaseModel): + """CloudWatch destination for batch evaluation output data. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + log_group_name: CloudWatch log group where evaluation results are written. + log_stream_name: CloudWatch log stream for this batch evaluation's results. + """ + + log_group_name: str + log_stream_name: str + + +class BatchEvaluationResult(BaseModel): + """Result returned by :py:meth:`BatchEvaluationRunner.run`. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + batch_evaluation_id: Unique identifier for the batch evaluation job, + returned by StartBatchEvaluation. + batch_evaluation_arn: ARN of the batch evaluation resource. + batch_evaluation_name: Human-readable name for the batch evaluation job. + description: Optional human-readable description of the batch evaluation job. + status: Terminal status of the job (e.g. ``"COMPLETED"``). + created_at: Timestamp when the batch evaluation job was created. + evaluation_results: Aggregated per-evaluator statistics. Present when + the job completed successfully; ``None`` otherwise. + error_details: Service-reported error messages when the job failed. + agent_invocation_failures: Scenarios that failed during the agent + invocation phase (before the evaluation job was started). A + non-empty list does not prevent the job from running — the service + evaluates only the successfully invoked sessions. + output_data_config: CloudWatch destination where the service writes + per-session evaluation result events. Pass to + :py:meth:`BatchEvaluationRunner.fetch_evaluation_events` + to read the raw OTel evaluation records. + """ + + batch_evaluation_id: str + batch_evaluation_arn: str + batch_evaluation_name: str + description: Optional[str] = None + status: str + created_at: datetime + evaluation_results: Optional[BatchEvaluationSummary] = None + error_details: Optional[List[str]] = None + agent_invocation_failures: List[FailedScenario] = Field(default_factory=list) + output_data_config: Optional[CloudWatchOutputDataConfig] = None + + +# --------------------------------------------------------------------------- +# Batch eval config +# --------------------------------------------------------------------------- + + +class BatchEvaluatorConfig(BaseModel): + """Configuration for evaluators. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + evaluator_ids: List of evaluator IDs (built-in names or custom ARNs). + """ + + evaluator_ids: List[str] = Field(min_length=1) + + +class BatchEvaluationRunConfig(BaseModel): + """Configuration for a single batch evaluation run. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + batch_evaluation_name: Human-readable name for the batch evaluation job. + evaluator_config: Evaluators to run (built-in IDs or custom ARNs). + data_source: Source from which the service reads agent session spans. + Use ``CloudWatchDataSourceConfig`` for agents running on AgentCore Runtime. + max_concurrent_scenarios: Maximum number of scenarios to invoke in + parallel during the agent invocation phase. Defaults to 5. + polling_timeout_seconds: Maximum time to wait for the evaluation job + to reach a terminal state. Defaults to 1800 (30 minutes). + polling_interval_seconds: Time between GetBatchEvaluation polls. + Defaults to 30 seconds. Must be less than ``polling_timeout_seconds``. + simulation_config: Actor simulation settings. Required when the dataset + contains SimulatedScenario entries. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + batch_evaluation_name: str + description: Optional[str] = None + evaluator_config: BatchEvaluatorConfig + data_source: DataSourceConfig + max_concurrent_scenarios: int = 5 + polling_timeout_seconds: int = 1800 + polling_interval_seconds: int = 30 + simulation_config: Optional[SimulationConfig] = None + + @model_validator(mode="after") + def validate_polling(self): + """Validate that polling_timeout_seconds > polling_interval_seconds and max_concurrent_scenarios > 0.""" + if self.polling_timeout_seconds <= self.polling_interval_seconds: + raise ValueError( + f"polling_timeout_seconds ({self.polling_timeout_seconds}) must be greater than " + f"polling_interval_seconds ({self.polling_interval_seconds})" + ) + if self.max_concurrent_scenarios <= 0: + raise ValueError("max_concurrent_scenarios must be > 0") + return self diff --git a/src/bedrock_agentcore/evaluation/runner/batch/batch_evaluation_runner.py b/src/bedrock_agentcore/evaluation/runner/batch/batch_evaluation_runner.py new file mode 100644 index 00000000..640676d5 --- /dev/null +++ b/src/bedrock_agentcore/evaluation/runner/batch/batch_evaluation_runner.py @@ -0,0 +1,523 @@ +"""Batch Evaluation Runner for AgentCore Evaluation Service. + +This module provides the BatchEvaluationRunner class that leverages the AgentCore +Evaluation Service's batch evaluation API to run evaluations asynchronously. +""" + +import json +import logging +import time +from concurrent.futures import ThreadPoolExecutor, as_completed +from datetime import datetime +from typing import Any, Dict, List, NamedTuple, Optional, Tuple + +import boto3 + +from bedrock_agentcore._utils.endpoints import DEFAULT_REGION, get_data_plane_endpoint +from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_models import ( + BatchEvaluationResult, + BatchEvaluationRunConfig, + BatchEvaluationSummary, + CloudWatchOutputDataConfig, + FailedScenario, +) +from bedrock_agentcore.evaluation.runner.dataset_types import Dataset, PredefinedScenario, Scenario, SimulatedScenario +from bedrock_agentcore.evaluation.runner.invoker_types import AgentInvokerFn +from bedrock_agentcore.evaluation.runner.scenario_executor import ( + PredefinedScenarioExecutor, + ScenarioExecutionResult, + ScenarioExecutor, + SimulatedScenarioExecutor, +) + +logger = logging.getLogger(__name__) + + +class _SessionExecutionMetadata(NamedTuple): + """Internal carrier for per-scenario execution results passed between private methods.""" + + scenario_id: str + session_id: str + start_time: datetime + end_time: datetime + ground_truth: Optional[Dict[str, Any]] + + +# States where the batch evaluation is still making progress +_RUNNING_STATES = frozenset({"PENDING", "IN_PROGRESS", "STOPPING"}) +_SUCCESSFUL_STATES = frozenset({"COMPLETED", "COMPLETED_WITH_ERRORS"}) +_TERMINAL_STATES = _SUCCESSFUL_STATES | frozenset({"FAILED", "STOPPED", "DELETING"}) + + +class BatchEvaluationRunner: + """Runs evaluation using the AgentCore Batch Evaluation API. + + Starts a batch evaluation via StartBatchEvaluation, and polls GetBatchEvaluation for results. + + .. warning:: + This feature is in preview and may change in future releases. + """ + + _SCENARIO_EXECUTORS: Dict[type, type[ScenarioExecutor]] = { + PredefinedScenario: PredefinedScenarioExecutor, + SimulatedScenario: SimulatedScenarioExecutor, + } + + def __init__(self, region: Optional[str] = None): + """Initialize the batch evaluation runner. + + Args: + region: AWS region. Defaults to boto3 session region or DEFAULT_REGION. + """ + session = boto3.Session() + self.region = region or session.region_name or DEFAULT_REGION + self.data_plane_client = session.client( + "bedrock-agentcore", + region_name=self.region, + endpoint_url=get_data_plane_endpoint(self.region), + ) + self._logs_client = session.client("logs", region_name=self.region) + + @staticmethod + def _get_boto3_error_code(e: Exception) -> Optional[str]: + """Extract the error code from a boto3 ClientError, or None.""" + if hasattr(e, "response") and isinstance(e.response, dict): + code = e.response.get("Error", {}).get("Code") + return str(code) if code is not None else None + return None + + def _transform_ground_truth(self, scenario: Scenario) -> Optional[dict]: + """Transform scenario ground truth into InlineGroundTruth format. + + Includes turns, assertions, expectedTrajectory when present. + + Args: + scenario: Scenario with optional ground truth fields. + + Returns: + InlineGroundTruth dict or None if no GT evaluation fields are present. + """ + ground_truth: Dict[str, Any] = {} + + if scenario.assertions: + ground_truth["assertions"] = [{"text": a} for a in scenario.assertions] + + if isinstance(scenario, PredefinedScenario): + if scenario.expected_trajectory is not None: + ground_truth["expectedTrajectory"] = {"toolNames": scenario.expected_trajectory} + + if scenario.turns: + ground_truth["turns"] = [ + { + "input": {"prompt": turn.input if isinstance(turn.input, str) else json.dumps(turn.input)}, + **({"expectedResponse": {"text": turn.expected_response}} if turn.expected_response else {}), + } + for turn in scenario.turns + ] + + if not ground_truth: + logger.debug( + "No ground truth fields found for scenario %s (%s)", + scenario.scenario_id, + type(scenario).__name__, + ) + return None + + return ground_truth + + def _execute_scenario( + self, + config: BatchEvaluationRunConfig, + scenario: Scenario, + agent_invoker: AgentInvokerFn, + ) -> ScenarioExecutionResult: + """Execute a single scenario and return the execution result. + + Args: + config: Batch evaluation run configuration. + scenario: Scenario to execute. + agent_invoker: Agent invocation function. + + Returns: + ScenarioExecutionResult with status "COMPLETED" or "FAILED". + + Raises: + TypeError: If the scenario type is not supported. + """ + executor_cls = self._SCENARIO_EXECUTORS.get(type(scenario)) + if executor_cls is None: + raise TypeError(f"Unsupported scenario type: {type(scenario).__name__}") + + kwargs: Dict[str, Any] = {"agent_invoker": agent_invoker} + if isinstance(scenario, SimulatedScenario): + kwargs["simulation_config"] = config.simulation_config + + executor = executor_cls(**kwargs) + return executor.run_scenario(scenario) + + def _execute_scenarios_parallel( + self, + config: BatchEvaluationRunConfig, + dataset: Dataset, + agent_invoker: AgentInvokerFn, + max_workers: int, + ) -> Tuple[List[_SessionExecutionMetadata], List[FailedScenario]]: + """Execute all scenarios in parallel using ThreadPoolExecutor. + + Args: + config: Batch evaluation run configuration forwarded to each executor. + dataset: Collection of scenarios. + agent_invoker: Agent invocation function. + max_workers: Maximum concurrent executions. + + Returns: + Tuple of (successful_sessions, failed_scenarios). + """ + successful_sessions: List[_SessionExecutionMetadata] = [] + failed_scenarios: List[FailedScenario] = [] + + workers = min(max_workers, len(dataset.scenarios)) if dataset.scenarios else 1 + with ThreadPoolExecutor(max_workers=workers) as executor: + future_to_scenario = { + executor.submit(self._execute_scenario, config, scenario, agent_invoker): scenario + for scenario in dataset.scenarios + } + for future in as_completed(future_to_scenario): + scenario = future_to_scenario[future] + try: + result = future.result() + if result.status == "FAILED": + logger.warning( + "Scenario %s failed during invocation: %s", + scenario.scenario_id, + result.error, + ) + failed_scenarios.append( + FailedScenario( + scenario_id=scenario.scenario_id, + error_message=result.error or "", + ) + ) + else: + successful_sessions.append( + _SessionExecutionMetadata( + scenario_id=result.scenario_id, + session_id=result.session_id, + start_time=result.start_time, + end_time=result.end_time, + ground_truth=self._transform_ground_truth(scenario), + ) + ) + except Exception as e: + logger.exception( + "Scenario %s failed during execution: %s", + scenario.scenario_id, + e, + ) + failed_scenarios.append( + FailedScenario( + scenario_id=scenario.scenario_id, + error_message=str(e), + ) + ) + + logger.info( + "Scenario execution complete: %d successful, %d failed", + len(successful_sessions), + len(failed_scenarios), + ) + if failed_scenarios: + logger.warning( + "Partial failure: %d/%d scenarios failed: %s", + len(failed_scenarios), + len(dataset.scenarios), + [fs.scenario_id for fs in failed_scenarios], + ) + + return successful_sessions, failed_scenarios + + def _poll_for_results( + self, + batch_evaluation_id: str, + timeout: int, + poll_interval: int, + ) -> Dict[str, Any]: + """Poll GetBatchEvaluation until a terminal state is reached. + + Args: + batch_evaluation_id: Batch evaluation ID returned by StartBatchEvaluation. + timeout: Maximum polling time in seconds. + poll_interval: Fixed interval between polls in seconds. + + Returns: + dict containing GetBatchEvaluation API response. + + Raises: + TimeoutError: If polling exceeds timeout. + RuntimeError: If the API call fails or the job reaches an unknown status. + """ + start_time = time.monotonic() + logger.info( + "Polling for batch evaluation %s (timeout=%ds, interval=%ds)", + batch_evaluation_id, + timeout, + poll_interval, + ) + + while True: + elapsed = time.monotonic() - start_time + if elapsed > timeout: + logger.error( + "Polling timeout exceeded for batch evaluation %s (elapsed=%.1fs, timeout=%ds)", + batch_evaluation_id, + elapsed, + timeout, + ) + raise TimeoutError(f"Polling timeout exceeded ({timeout}s) for batch evaluation {batch_evaluation_id}") + + try: + response: Dict[str, Any] = self.data_plane_client.get_batch_evaluation( + batchEvaluationId=batch_evaluation_id, + ) + except Exception as e: + error_code = self._get_boto3_error_code(e) + logger.exception( + "GetBatchEvaluation failed (error_code=%s): %s", + error_code, + e, + ) + raise RuntimeError(f"Failed to get batch evaluation result: {e} (error_code={error_code})") from e + + status = response.get("status") + logger.info( + "Batch evaluation %s status: %s (elapsed: %.1fs)", + batch_evaluation_id, + status, + elapsed, + ) + + if status in _TERMINAL_STATES: + if status not in _SUCCESSFUL_STATES: + logger.warning( + "Batch evaluation %s reached non-successful terminal status %s", + batch_evaluation_id, + status, + ) + return response + + if status in _RUNNING_STATES: + time.sleep(poll_interval) + continue + + raise RuntimeError(f"Unknown batch evaluation status: {status}") + + def run_dataset_evaluation( + self, + config: BatchEvaluationRunConfig, + dataset: Dataset, + agent_invoker: AgentInvokerFn, + ) -> BatchEvaluationResult: + """Run a batch evaluation on a Dataset. + + Executes all scenarios in parallel via ``agent_invoker``, transforms ground + truth data, submits the collected sessions to ``StartBatchEvaluation``, + and polls until the job reaches a terminal state. + + The returned :class:`BatchEvaluationResult` contains two levels of data: + + * ``result.evaluation_results`` — aggregate per-evaluator statistics + (average scores, session counts). Available immediately. + * Call :py:meth:`fetch_evaluation_events` for individual per-turn scores + with explanations (``gen_ai.evaluation.explanation``). + + Args: + config: Evaluation name, evaluator IDs, session source, + concurrency, and polling timeouts. + dataset: Scenarios to evaluate, with optional ground truth + (``assertions``, ``expected_trajectory``, per-turn + ``expected_response``). + agent_invoker: Called once per turn per scenario. Must be + thread-safe — up to ``config.max_concurrent_scenarios`` threads + invoke it concurrently. + + Returns: + :class:`BatchEvaluationResult` with job IDs, status, + ``evaluation_results`` (:class:`BatchEvaluationSummary`), + ``agent_invocation_failures``, and ``output_data_config``. + + Raises: + ValueError: If ``dataset`` is empty or all scenarios fail during + agent invocation. + RuntimeError: If API calls fail. + TimeoutError: If the job exceeds ``config.polling_timeout_seconds``. + """ + if not dataset.scenarios: + raise ValueError("Dataset must contain at least one scenario") + logger.info( + "Starting batch evaluation: %d scenarios, max_concurrent=%d, timeout=%ds", + len(dataset.scenarios), + config.max_concurrent_scenarios, + config.polling_timeout_seconds, + ) + + successful_sessions, failed_scenarios = self._execute_scenarios_parallel( + config, + dataset, + agent_invoker, + config.max_concurrent_scenarios, + ) + + if not successful_sessions: + raise ValueError( + f"All {len(dataset.scenarios)} scenarios failed during execution. " + f"Failed scenario IDs: {[fs.scenario_id for fs in failed_scenarios]}" + ) + + config.data_source.pre_evaluation_run_hook() + + session_metadata_list = [ + { + "sessionId": session.session_id, + "testScenarioId": session.scenario_id, + **({"groundTruth": {"inline": session.ground_truth}} if session.ground_truth else {}), + } + for session in successful_sessions + ] + + logger.info("Calling StartBatchEvaluation (name=%s)", config.batch_evaluation_name) + try: + start_kwargs: Dict[str, Any] = dict( + batchEvaluationName=config.batch_evaluation_name, + evaluators=[{"evaluatorId": eid} for eid in config.evaluator_config.evaluator_ids], + dataSourceConfig=config.data_source.to_data_source_config( + [s.session_id for s in successful_sessions], + min(s.start_time for s in successful_sessions), + max(s.end_time for s in successful_sessions), + ), + evaluationMetadata={"sessionMetadata": session_metadata_list}, + ) + if config.description is not None: + start_kwargs["description"] = config.description + start_response = self.data_plane_client.start_batch_evaluation(**start_kwargs) + except Exception as e: + error_code = self._get_boto3_error_code(e) + logger.exception( + "StartBatchEvaluation failed (name=%s, error_code=%s): %s", + config.batch_evaluation_name, + error_code, + e, + ) + raise RuntimeError(f"StartBatchEvaluation failed: {e} (error_code={error_code})") from e + + batch_evaluation_id: str = start_response["batchEvaluationId"] + batch_evaluation_arn: str = start_response["batchEvaluationArn"] + logger.info("Started batch evaluation: %s", batch_evaluation_id) + + response = self._poll_for_results( + batch_evaluation_id, + config.polling_timeout_seconds, + config.polling_interval_seconds, + ) + + evaluation_results = None + if "evaluationResults" in response: + evaluation_results = BatchEvaluationSummary.model_validate(response["evaluationResults"]) + + output_data_config = None + if "outputConfig" in response: + odc = response["outputConfig"].get("cloudWatchConfig") + if odc: + output_data_config = CloudWatchOutputDataConfig( + log_group_name=odc["logGroupName"], + log_stream_name=odc["logStreamName"], + ) + + result = BatchEvaluationResult( + batch_evaluation_id=batch_evaluation_id, + batch_evaluation_arn=batch_evaluation_arn, + batch_evaluation_name=response["batchEvaluationName"], + status=response["status"], + created_at=response["createdAt"], + description=response.get("description"), + agent_invocation_failures=failed_scenarios, + evaluation_results=evaluation_results, + error_details=response.get("errorDetails"), + output_data_config=output_data_config, + ) + + logger.info( + "Batch evaluation complete: batch_evaluation_id=%s, status=%s, sessions_completed=%s, sessions_failed=%s", + result.batch_evaluation_id, + result.status, + result.evaluation_results.number_of_sessions_completed if result.evaluation_results else None, + result.evaluation_results.number_of_sessions_failed if result.evaluation_results else None, + ) + return result + + def fetch_evaluation_events(self, result: BatchEvaluationResult) -> List[Dict[str, Any]]: + """Fetch per-turn evaluation events from CloudWatch. + + Complements ``result.evaluation_results`` (:class:`BatchEvaluationSummary`), + which contains aggregate average scores. This method returns one OTel event + per turn per evaluator, each with an individual score and a natural-language + explanation (``gen_ai.evaluation.explanation``). + + Args: + result: Completed :class:`BatchEvaluationResult` from + :py:meth:`run_dataset_evaluation`. + + Returns: + List of event dicts, one per turn per evaluator, containing + ``gen_ai.evaluation.name``, ``gen_ai.evaluation.score.value``, + ``gen_ai.evaluation.score.label``, ``gen_ai.evaluation.explanation``, + and trace context (``traceId``, ``gen_ai.response.id``). + + Raises: + ValueError: If ``result.output_data_config`` is ``None`` (job did + not produce a CloudWatch destination). + LookupError: If the log stream does not exist yet; retry after a + short delay. + """ + if result.output_data_config is None: + raise ValueError( + f"No output_data_config on batch evaluation {result.batch_evaluation_id}. " + "The service did not return a CloudWatch destination for this evaluation." + ) + output_data_config = result.output_data_config + results: List[Dict[str, Any]] = [] + kwargs: Dict[str, Any] = { + "logGroupName": output_data_config.log_group_name, + "logStreamName": output_data_config.log_stream_name, + "startFromHead": True, + } + try: + while True: + response = self._logs_client.get_log_events(**kwargs) + for event in response.get("events", []): + message = event.get("message", "") + try: + results.append(json.loads(message)) + except json.JSONDecodeError: + logger.warning( + "Skipping non-JSON log event in stream %s: %r", + output_data_config.log_stream_name, + message[:200], + ) + next_token = response.get("nextForwardToken") + if next_token == kwargs.get("nextToken"): + break + kwargs.pop("startFromHead", None) + kwargs["nextToken"] = next_token + except self._logs_client.exceptions.ResourceNotFoundException as e: + raise LookupError( + f"CloudWatch log stream not found: group={output_data_config.log_group_name!r}, " + f"stream={output_data_config.log_stream_name!r}. " + "Evaluation results may not have been written yet." + ) from e + logger.info( + "Fetched %d evaluation result events from %s/%s", + len(results), + output_data_config.log_group_name, + output_data_config.log_stream_name, + ) + return results diff --git a/src/bedrock_agentcore/evaluation/runner/dataset_providers.py b/src/bedrock_agentcore/evaluation/runner/dataset_providers.py index dc1da43e..f8cb2cb4 100644 --- a/src/bedrock_agentcore/evaluation/runner/dataset_providers.py +++ b/src/bedrock_agentcore/evaluation/runner/dataset_providers.py @@ -2,9 +2,9 @@ import json from abc import ABC, abstractmethod -from typing import Any, Dict +from typing import Any, Dict, List -from .dataset_types import Dataset, PredefinedScenario, Turn +from .dataset_types import ActorProfile, Dataset, PredefinedScenario, Scenario, SimulatedScenario, Turn class DatasetProvider(ABC): @@ -26,18 +26,35 @@ def get_dataset(self) -> Dataset: """Load and return the dataset from the JSON file.""" with open(self._file_path) as f: data = json.load(f) - scenarios = [self._parse_scenario(s) for s in data["scenarios"]] + scenarios: List[Scenario] = [self._parse_scenario(s) for s in data["scenarios"]] return Dataset(scenarios=scenarios) @staticmethod - def _parse_scenario(raw: Dict[str, Any]) -> PredefinedScenario: - return PredefinedScenario( - scenario_id=raw["scenario_id"], - turns=[FileDatasetProvider._parse_turn(t) for t in raw["turns"]], - expected_trajectory=raw.get("expected_trajectory"), - assertions=raw.get("assertions"), - metadata=raw.get("metadata"), - ) + def _parse_scenario(raw: Dict[str, Any]) -> PredefinedScenario | SimulatedScenario: + if "turns" in raw: + return PredefinedScenario( + scenario_id=raw["scenario_id"], + turns=[FileDatasetProvider._parse_turn(t) for t in raw["turns"]], + expected_trajectory=raw.get("expected_trajectory"), + assertions=raw.get("assertions"), + metadata=raw.get("metadata"), + ) + else: + missing = [k for k in ("scenario_id", "actor_profile", "input") if k not in raw] + if missing: + raise ValueError( + f"Scenario '{raw.get('scenario_id', '?')}' is missing required fields " + f"for SimulatedScenario: {missing}" + ) + return SimulatedScenario( + scenario_id=raw["scenario_id"], + scenario_description=raw.get("scenario_description", ""), + actor_profile=ActorProfile(**raw["actor_profile"]), + input=raw["input"], + max_turns=raw.get("max_turns", 10), + assertions=raw.get("assertions"), + metadata=raw.get("metadata"), + ) @staticmethod def _parse_turn(raw: Dict[str, Any]) -> Turn: diff --git a/src/bedrock_agentcore/evaluation/runner/dataset_types.py b/src/bedrock_agentcore/evaluation/runner/dataset_types.py index 50fe63f0..6d8b6206 100644 --- a/src/bedrock_agentcore/evaluation/runner/dataset_types.py +++ b/src/bedrock_agentcore/evaluation/runner/dataset_types.py @@ -3,13 +3,30 @@ Defines how evaluation datasets, scenarios, and turns are structured. """ -from typing import Any, Dict, List, Optional, Union +from typing import Any, Dict, List, Optional, Type, Union -from pydantic import BaseModel, model_validator +from pydantic import BaseModel, ConfigDict, model_validator Input = Union[str, Dict[str, Any]] +class ActorProfile(BaseModel): + """Profile describing the simulated actor's identity and objective. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + traits: Characteristics of the actor (e.g. expertise level, communication style). + context: Background information about the actor. + goal: What the actor wants to achieve in the interaction. + """ + + traits: Dict[str, Any] = {} + context: str + goal: str + + class Turn(BaseModel): """A single conversational turn in an evaluation scenario.""" @@ -39,6 +56,37 @@ def validate_turns_non_empty(self): return self +class SimulatedScenario(Scenario): + """A scenario driven by a simulated actor in a multi-turn conversation loop. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + scenario_description: Human-readable description of what this scenario tests. + actor_profile: Profile defining the simulated actor's traits, context, and goal. + input: The initial payload sent to the agent to start the conversation. + Accepts a plain string, a structured dict, or a ``pydantic.BaseModel`` + instance (e.g. an instance of ``SimulationConfig.input_type``). + max_turns: Maximum number of conversation turns before the simulation stops. + Defaults to 10. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + scenario_description: str = "" + actor_profile: ActorProfile + input: Union[str, Dict[str, Any], BaseModel] + max_turns: int = 10 + + @model_validator(mode="after") + def validate_max_turns(self): + """Validate that max_turns is at least 1.""" + if self.max_turns < 1: + raise ValueError("max_turns must be >= 1") + return self + + class Dataset(BaseModel): """A collection of evaluation scenarios.""" @@ -49,8 +97,37 @@ def validate_scenarios(self): """Validate that scenarios list is not empty and has unique IDs.""" if not self.scenarios: raise ValueError("scenarios must not be empty") - ids = [s.scenario_id for s in self.scenarios] - duplicates = set(sid for sid in ids if ids.count(sid) > 1) + seen: set = set() + duplicates: set = set() + for s in self.scenarios: + (duplicates if s.scenario_id in seen else seen).add(s.scenario_id) if duplicates: raise ValueError(f"Duplicate scenario_ids: {duplicates}") return self + + +class SimulationConfig(BaseModel): + """Configuration for actor simulation in SimulatedScenario execution. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + model_id: Bedrock model ID for the actor agent. Uses the Strands + default model when None. + system_prompt_template: Jinja2 system prompt template for the actor. + Must contain an ``{{ actor_profile }}`` placeholder. When None, the built-in + ``structured_user_simulator.j2`` template is used. + input_type: Pydantic model class describing the agent's expected input. + When set, ``input`` values in SimulatedScenario are validated into + this type for the first agent call. For subsequent turns the actor is + schema-constrained via tool-use to produce instances of this type directly. + output_type: Pydantic model class describing the agent's output schema. + """ + + model_config = ConfigDict(arbitrary_types_allowed=True) + + model_id: Optional[str] = None + system_prompt_template: Optional[str] = None + input_type: Optional[Type[BaseModel]] = None + output_type: Optional[Type[BaseModel]] = None diff --git a/src/bedrock_agentcore/evaluation/runner/on_demand/__init__.py b/src/bedrock_agentcore/evaluation/runner/on_demand/__init__.py index 7eb9d633..389e425b 100644 --- a/src/bedrock_agentcore/evaluation/runner/on_demand/__init__.py +++ b/src/bedrock_agentcore/evaluation/runner/on_demand/__init__.py @@ -1,11 +1,13 @@ """Evaluation runner: orchestrates agent evaluation end-to-end.""" from bedrock_agentcore.evaluation.agent_span_collector import AgentSpanCollector, CloudWatchAgentSpanCollector +from bedrock_agentcore.evaluation.runner.dataset_types import SimulationConfig from ..scenario_executor import ( PredefinedScenarioExecutor, ScenarioExecutionResult, ScenarioExecutor, + SimulatedScenarioExecutor, ) from .config import EvaluationRunConfig, EvaluatorConfig from .on_demand_runner import OnDemandEvaluationDatasetRunner @@ -23,4 +25,6 @@ "ScenarioResult", "ScenarioExecutor", "PredefinedScenarioExecutor", + "SimulatedScenarioExecutor", + "SimulationConfig", ] diff --git a/src/bedrock_agentcore/evaluation/runner/on_demand/config.py b/src/bedrock_agentcore/evaluation/runner/on_demand/config.py index 527432c5..de975b44 100644 --- a/src/bedrock_agentcore/evaluation/runner/on_demand/config.py +++ b/src/bedrock_agentcore/evaluation/runner/on_demand/config.py @@ -1,9 +1,11 @@ """Configuration for the evaluation runner.""" -from typing import List +from typing import List, Optional from pydantic import BaseModel +from bedrock_agentcore.evaluation.runner.dataset_types import SimulationConfig + class EvaluatorConfig(BaseModel): """Configuration for evaluators. @@ -16,12 +18,17 @@ class EvaluatorConfig(BaseModel): class EvaluationRunConfig(BaseModel): - """Top-level configuration for an evaluation run. + """Top-level configuration for an on-demand evaluation run. Attributes: evaluator_config: Evaluator settings. + evaluation_delay_seconds: Seconds to wait for CloudWatch span ingestion. + max_concurrent_scenarios: Thread pool size for concurrent scenario execution. + simulation_config: Actor simulation settings. Required when the dataset + contains SimulatedScenario entries. """ evaluator_config: EvaluatorConfig evaluation_delay_seconds: int = 180 max_concurrent_scenarios: int = 5 + simulation_config: Optional[SimulationConfig] = None diff --git a/src/bedrock_agentcore/evaluation/runner/on_demand/on_demand_runner.py b/src/bedrock_agentcore/evaluation/runner/on_demand/on_demand_runner.py index 3167f1d2..5b36b047 100644 --- a/src/bedrock_agentcore/evaluation/runner/on_demand/on_demand_runner.py +++ b/src/bedrock_agentcore/evaluation/runner/on_demand/on_demand_runner.py @@ -11,12 +11,13 @@ from bedrock_agentcore.evaluation.agent_span_collector import AgentSpanCollector -from ..dataset_types import Dataset, PredefinedScenario, Scenario +from ..dataset_types import Dataset, PredefinedScenario, Scenario, SimulatedScenario from ..invoker_types import AgentInvokerFn from ..scenario_executor import ( PredefinedScenarioExecutor, ScenarioExecutionResult, ScenarioExecutor, + SimulatedScenarioExecutor, ) from .config import EvaluationRunConfig from .result import EvaluationResult, EvaluatorResult, ScenarioResult @@ -45,6 +46,7 @@ def __init__(self, region: Optional[str] = None): self._cache_lock = threading.Lock() self._scenario_executors: Dict[type, type[ScenarioExecutor]] = { PredefinedScenario: PredefinedScenarioExecutor, + SimulatedScenario: SimulatedScenarioExecutor, } def run( @@ -85,7 +87,7 @@ def run( logger.info("Phase 1: Invoking %d scenario(s)", num_scenarios) invoke_futures = {} for idx, scenario in enumerate(dataset.scenarios): - future = pool.submit(self._run_scenario, scenario, agent_invoker) + future = pool.submit(self._run_scenario, config, scenario, agent_invoker) invoke_futures[idx] = (scenario, future) # Collect invocation results @@ -177,6 +179,7 @@ def _collect_and_evaluate( def _run_scenario( self, + config: EvaluationRunConfig, scenario: Scenario, agent_invoker: AgentInvokerFn, ) -> ScenarioExecutionResult: @@ -184,7 +187,12 @@ def _run_scenario( executor_cls = self._scenario_executors.get(type(scenario)) if executor_cls is None: raise TypeError(f"No runner registered for scenario type: {type(scenario).__name__}") - executor = executor_cls(agent_invoker=agent_invoker) + + kwargs: Dict[str, Any] = {"agent_invoker": agent_invoker} + if isinstance(scenario, SimulatedScenario): + kwargs["simulation_config"] = config.simulation_config + + executor = executor_cls(**kwargs) return executor.run_scenario(scenario) # --- Step 2: Collect Spans --- diff --git a/src/bedrock_agentcore/evaluation/runner/prompts/__init__.py b/src/bedrock_agentcore/evaluation/runner/prompts/__init__.py new file mode 100644 index 00000000..8b36bdc8 --- /dev/null +++ b/src/bedrock_agentcore/evaluation/runner/prompts/__init__.py @@ -0,0 +1,56 @@ +"""Prompt template rendering utilities for the evaluation runner. + +Templates are stored as Jinja2 ``.j2`` files in this directory. +Use :func:`render_template_file` for built-in templates and +:func:`render_template_string` for caller-supplied template strings. +""" + +from pathlib import Path +from typing import Any + +_PROMPTS_DIR = Path(__file__).parent +_environment = None + + +def _get_environment(): + global _environment + if _environment is None: + try: + from jinja2 import Environment, FileSystemLoader + except ImportError as e: + raise ImportError( + "jinja2 is required for SimulatedScenario execution. " + "Install it with: pip install 'bedrock-agentcore[simulation]'" + ) from e + _environment = Environment( # nosec B701 - templates render plain text/JSON, not HTML + loader=FileSystemLoader(_PROMPTS_DIR), + autoescape=False, + keep_trailing_newline=True, + ) + return _environment + + +def render_template_file(name: str, **kwargs: Any) -> str: + """Render a built-in ``.j2`` template file by name. + + Args: + name: Filename relative to the prompts directory (e.g. ``"structured_user_simulator.j2"``). + **kwargs: Variables substituted into the template. + + Returns: + The rendered template string. + """ + return _get_environment().get_template(name).render(**kwargs) + + +def render_template_string(template_str: str, **kwargs: Any) -> str: + """Render a caller-supplied Jinja2 template string. + + Args: + template_str: A Jinja2 template string (use ``{{ variable }}`` syntax). + **kwargs: Variables substituted into the template. + + Returns: + The rendered template string. + """ + return _get_environment().from_string(template_str).render(**kwargs) diff --git a/src/bedrock_agentcore/evaluation/runner/prompts/structured_user_simulator.j2 b/src/bedrock_agentcore/evaluation/runner/prompts/structured_user_simulator.j2 new file mode 100644 index 00000000..bf91f4c4 --- /dev/null +++ b/src/bedrock_agentcore/evaluation/runner/prompts/structured_user_simulator.j2 @@ -0,0 +1,63 @@ +## User Simulation + +Core Identity: +- You are simulating a user seeking assistance from an AI assistant +- You speak in first person only +- You strictly follow your defined User Goal and User Profile throughout the conversation + +## User Profile +{{ actor_profile }} + +{% if scenario_description %} +## Scenario +{{ scenario_description }} +{% endif %} +{% if output_schema %} +## Agent Response Format +The agent will respond with a valid JSON object matching this schema: +{{ output_schema }} +{% endif %} + +Response Protocols: + When assistant requests information: + - Provide brief, specific information + - Maximum 2-3 sentences + + When assistant provides solutions/answers: + - Ask follow-ups, seek clarification, or express satisfaction. Do not deviate from the User Goal. + - While following up, do not increase the conversation scope beyond your User Goal. + +Communication Rules: +1. STRICT maximum response length: 2-3 sentences +2. You are seeking help, NOT providing help - never give solutions! +3. Maintain your user profile and expertise level consistently +4. Express more of your user profile - let your background, expertise level, and personality + shine through in your responses +5. Don't break character by mentioning "assistant" or "AI" explicitly +6. Address AI assistant responses in second person ("Your suggestion..." not "The assistant's suggestion...") +7. Do not explicitly mention conversation redirection +8. Never include meta-references or self-instructions in your responses. These reveal you + are a simulator and is not how a real human would communicate. Don't write phrases like: + - I need to respond as the user would ... + - As the simulated user, I should ... + - Here's how the user might respond ... + - Based on my user goal, I need to ... +9. Use the Exit Conditions strictly to stick to User Goal. +10. Use all relevant tools first to ground your responses, and then respond + +Exit Conditions: +1. Use get_conversation_goal_completion tool to check if your User Goal is met. When your + User Goal is met, set stop=true in your structured response to end the conversation. +2. If conversation becomes unproductive or unsafe: + - Naturally steer back towards your User Goal + - If this becomes impossible, set stop=true in your structured response to end the conversation + +CRITICAL BEHAVIORAL CONSTRAINTS: +- You are ONLY a user seeking assistance, NEVER the one providing assistance. +- NEVER generate comprehensive responses, detailed plans, or extensive information. +- NEVER solve problems yourself - that's the assistant's job. Under no circumstances, + you can use your tools to solve your user goal/sub goals. +- If you find yourself writing more than 3 sentences, you're doing it wrong. + +Response Format: +Generate ONLY the next SHORT message (1-3 sentences). No explanations, no solutions, no comprehensive information. diff --git a/src/bedrock_agentcore/evaluation/runner/scenario_executor.py b/src/bedrock_agentcore/evaluation/runner/scenario_executor.py index 50b18d43..b042fcff 100644 --- a/src/bedrock_agentcore/evaluation/runner/scenario_executor.py +++ b/src/bedrock_agentcore/evaluation/runner/scenario_executor.py @@ -1,22 +1,57 @@ """Scenario executor abstractions for the evaluation framework. Each ScenarioExecutor subclass owns the invocation logic for a specific scenario type, -keeping the OnDemandEvaluationDatasetRunner agnostic to how turns are produced. +keeping the runners agnostic to how turns are produced. """ +import json import logging +import random import uuid from abc import ABC, abstractmethod +from dataclasses import dataclass from datetime import datetime, timezone -from typing import Optional +from typing import Any, List, Optional, Type -from pydantic import BaseModel, ConfigDict +from pydantic import BaseModel, ConfigDict, Field, ValidationError, create_model -from .dataset_types import Scenario +from .dataset_types import Scenario, SimulatedScenario, SimulationConfig from .invoker_types import AgentInvokerFn, AgentInvokerInput +from .prompts import render_template_file, render_template_string logger = logging.getLogger(__name__) +_INITIAL_GREETINGS: List[str] = [ + "hi! how can I help you today?", + "hello! what can I assist you with?", + "hi there! how may I help you?", + "good day! what can I do for you?", + "hello! what would you like to know?", +] + + +@dataclass +class SimulatorResult: + """Output from a single actor turn in a simulated conversation. + + .. warning:: + This feature is in preview and may change in future releases. + + Attributes: + message: The actor's next message. An ``input_type`` instance when + ``input_type`` is configured; a plain ``str`` or ``None`` otherwise. + ``None`` when ``stop=True`` regardless of whether ``input_type`` is set. + reasoning: The actor's internal reasoning for this response. + stop: ``True`` when the actor signals the conversation should end. + stop_reason: Why the conversation ended: ``"goal_completed"``, + ``"max_turns"``, or ``None`` when the conversation is still ongoing. + """ + + message: Any + reasoning: str + stop: bool + stop_reason: Optional[str] + class ScenarioExecutionResult(BaseModel): """Return value from a scenario execution.""" @@ -90,3 +125,350 @@ def run_scenario(self, scenario: Scenario) -> ScenarioExecutionResult: status=status, error=error, ) + + +class SimulatedScenarioExecutor(ScenarioExecutor): + """Runs a SimulatedScenario using AgentCoreActorSimulator. + + .. warning:: + This feature is in preview and may change in future releases. + + Uses a dynamically-typed structured output model so the LLM is schema-constrained + via tool-use enforcement to produce correctly-typed messages, eliminating the need + for JSON parsing heuristics. + """ + + simulation_config: Optional[SimulationConfig] = None + + def run_scenario(self, scenario: Scenario) -> ScenarioExecutionResult: + """Execute a simulated scenario using an actor-driven conversation loop.""" + if not isinstance(scenario, SimulatedScenario): + raise TypeError(f"Expected SimulatedScenario, got {type(scenario).__name__}") + sim_config = self.simulation_config + + start_time = datetime.now(timezone.utc) + session_id = f"{scenario.scenario_id}-{uuid.uuid4()}" + turn_count = 0 + + try: + try: + from strands_evals.simulation.tools.goal_completion import get_conversation_goal_completion + from strands_evals.types.simulation import ActorProfile as StrandsActorProfile + except ImportError as e: + raise ImportError( + "strands-agents-evals is required for SimulatedScenario execution. " + "Install it with: pip install 'bedrock-agentcore[simulation]'" + ) from e + + strands_profile = StrandsActorProfile( + traits=scenario.actor_profile.traits, + context=scenario.actor_profile.context, + actor_goal=scenario.actor_profile.goal, + ) + + system_prompt = _render_system_prompt(sim_config, strands_profile, scenario.scenario_description) + + input_type = sim_config.input_type if sim_config else None + output_type = sim_config.output_type if sim_config else None + simulator = AgentCoreActorSimulator( + actor_profile=strands_profile, + initial_query=_to_string(scenario.input), + system_prompt=system_prompt, + input_type=input_type, + model=sim_config.model_id if sim_config else None, + max_turns=scenario.max_turns, + tools=[get_conversation_goal_completion], + ) + + next_payload = _build_payload(scenario.input, sim_config) + + while True: + turn_count += 1 + logger.debug( + "Turn %d for scenario %s (session_id=%s)", + turn_count, + scenario.scenario_id, + session_id, + ) + output = self.agent_invoker(AgentInvokerInput(payload=next_payload, session_id=session_id)) + sim_result = simulator.act(_extract_agent_output(output.agent_output, output_type)) + logger.debug( + "Turn %d actor result: stop=%s, stop_reason=%s", + turn_count, + sim_result.stop, + sim_result.stop_reason, + ) + + if sim_result.stop: + logger.info( + "Scenario %s: actor ended conversation (reason=%s)", + scenario.scenario_id, + sim_result.stop_reason, + ) + break + + if turn_count >= scenario.max_turns: + logger.warning( + "Scenario %s: executor hit max_turns backstop (%d); simulator did not signal stop", + scenario.scenario_id, + turn_count, + ) + break + + next_payload = sim_result.message + + status = "COMPLETED" + error = None + + except Exception as e: + logger.exception("Scenario %s failed: %s", scenario.scenario_id, e) + status = "FAILED" + error = str(e) + + end_time = datetime.now(timezone.utc) + elapsed = (end_time - start_time).total_seconds() + if status == "COMPLETED": + logger.info( + "Scenario %s completed (%d turn(s) in %.1fs), time_range=[%s, %s]", + scenario.scenario_id, + turn_count, + elapsed, + start_time, + end_time, + ) + return ScenarioExecutionResult( + scenario_id=scenario.scenario_id, + session_id=session_id, + start_time=start_time, + end_time=end_time, + status=status, + error=error, + ) + + +class AgentCoreActorSimulator: + """Actor simulator with dynamically-typed structured output. + + .. warning:: + This feature is in preview and may change in future releases. + + Uses a strands ``Agent`` with a per-scenario Pydantic response model whose + ``message`` field is typed as ``Optional[input_type]`` when ``input_type`` + is provided. The LLM tool-use schema then enforces the correct message + structure rather than relying on prompt instructions. + + Response model when ``input_type`` is set:: + + SimulatorActorResponse(reasoning: str, stop: bool, message: Optional[input_type]) + + Response model when ``input_type`` is ``None``:: + + SimulatorActorResponse(reasoning: str, stop: bool, message: Optional[str]) + + **Conversation history bootstrap**: the actor's strands ``Agent`` is seeded + with a two-message history before the first real ``act()`` call: + + - ``user`` (agent's synthetic opener): a random greeting from + ``_INITIAL_GREETINGS``, standing in for the agent saying hello. + - ``assistant`` (actor's first turn): the ``initial_query`` string derived + from ``input``. + + This gives the actor the context that it has already sent its opening + question, so the first real ``act()`` call — which delivers the agent's + actual response to that question — arrives with a coherent conversation + history. The greeting is never sent to the real agent; it exists only to + orient the actor. + """ + + def __init__( + self, + actor_profile: Any, + initial_query: str, + system_prompt: str, + input_type: Optional[Type[BaseModel]] = None, + model: Optional[str] = None, + max_turns: int = 10, + tools: Optional[list] = None, + ): + """Initialize the simulator, building the response model and seeding conversation history.""" + from strands import Agent + + self._input_type = input_type + self._max_turns = max_turns + self._turn_count = 0 + self._response_model = _make_response_model(input_type) + + conversation_history = [ + {"role": "user", "content": [{"text": random.choice(_INITIAL_GREETINGS)}]}, + {"role": "assistant", "content": [{"text": initial_query.strip()}]}, + ] + + self._agent = Agent( + system_prompt=system_prompt, + messages=conversation_history, + tools=tools or [], + model=model, + callback_handler=None, + ) + + def act(self, agent_message: str) -> SimulatorResult: + """Send the agent's response to the actor and return a SimulatorResult.""" + response = self._agent(agent_message.strip(), structured_output_model=self._response_model) + self._turn_count += 1 + + actor_response = response.structured_output + stop = bool(actor_response.stop) or self._turn_count >= self._max_turns + stop_reason: Optional[str] = None + if stop: + if actor_response.stop: + stop_reason = "goal_completed" + else: + stop_reason = "max_turns" + + message = actor_response.message + + # Guard: actor signalled continue but produced no message — treat as implicit stop. + if not stop and message is None: + input_type_suffix = f" (input_type={self._input_type.__name__})" if self._input_type else "" + logger.warning( + "Actor produced null message when stop=False; treating as goal_completed%s", input_type_suffix + ) + stop = True + stop_reason = "goal_completed" + + return SimulatorResult( + message=message, + reasoning=actor_response.reasoning, + stop=stop, + stop_reason=stop_reason, + ) + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_response_model(input_type: Optional[Type[BaseModel]]) -> Type[BaseModel]: + """Build a dynamic Pydantic model for actor structured output. + + When ``input_type`` is provided, ``message`` is typed as + ``Optional[input_type]`` so the LLM tool-use schema enforces the correct + structure on non-stop turns. + """ + if input_type is None: + msg_annotation = Optional[str] + msg_field = Field( + None, + description="The actor's next message to send to the agent. Provide when stop=false. Null when stop=true.", + ) + else: + msg_annotation = Optional[input_type] + msg_field = Field( + None, + description=( + f"Structured message matching the agent's input schema ({input_type.__name__}). " + "Provide when stop=false. Set to null when stop=true." + ), + ) + + return create_model( + "SimulatorActorResponse", + __base__=BaseModel, + reasoning=(str, Field(..., description="Internal reasoning for this response.")), + stop=( + bool, + Field( + False, + description="Set to true when the conversation goal is met or the conversation should end.", + ), + ), + message=(msg_annotation, msg_field), + ) + + +def _render_system_prompt( + sim_config: Optional[SimulationConfig], + strands_profile: Any, + scenario_description: str = "", +) -> str: + """Render the actor system prompt from a Jinja2 template. + + Resolution order: + 1. User-supplied ``system_prompt_template`` (rendered as a Jinja2 string). + 2. Built-in ``structured_user_simulator.j2`` (always used when no custom template). + + The ``actor_profile`` dict and optional ``output_schema`` (when ``output_type`` is + set) are injected as template variables. Structured input typing is enforced via + the response model's tool-use schema rather than the system prompt. + """ + actor_profile_data = strands_profile.model_dump() + + if sim_config and sim_config.system_prompt_template: + kwargs: dict = {"actor_profile": actor_profile_data, "scenario_description": scenario_description} + if sim_config.output_type: + kwargs["output_schema"] = json.dumps(sim_config.output_type.model_json_schema(), indent=2) + return render_template_string(sim_config.system_prompt_template, **kwargs) + + output_schema = ( + json.dumps(sim_config.output_type.model_json_schema(), indent=2) + if sim_config and sim_config.output_type + else None + ) + return render_template_file( + "structured_user_simulator.j2", + actor_profile=actor_profile_data, + scenario_description=scenario_description, + output_schema=output_schema, + ) + + +def _to_string(value: Any) -> str: + """Serialize a value to a plain string.""" + if isinstance(value, str): + return value + if isinstance(value, BaseModel): + return value.model_dump_json() + if isinstance(value, dict): + return json.dumps(value) + return str(value) + + +def _extract_agent_output(agent_output: Any, output_type: Optional[Type[BaseModel]]) -> str: + """Serialize the agent's output into a string suitable for the actor. + + When ``output_type`` is provided the agent output is validated against that + schema and re-serialized as canonical JSON. If parsing fails the output is + serialized with ``_to_string`` and returned as-is. + + When ``output_type`` is ``None`` the output is serialized directly with + ``_to_string``. + """ + if output_type is None: + return _to_string(agent_output) + + raw = _to_string(agent_output) + try: + parsed = output_type.model_validate_json(raw) + except ValidationError: + logger.warning("Agent output could not be parsed as %s; passing through as-is", output_type.__name__) + return raw + + return parsed.model_dump_json() + + +def _build_payload(input: Any, sim_config: Optional[SimulationConfig]) -> Any: + """Return the first agent payload, parsing input into input_type when configured.""" + if sim_config and sim_config.input_type: + if isinstance(input, sim_config.input_type): + return input + if isinstance(input, BaseModel): + # Wrong BaseModel subtype — coerce via its dict representation so + # Pydantic validates compatibility and raises ValidationError on mismatch + # rather than silently passing the wrong type to the agent. + return sim_config.input_type.model_validate(input.model_dump()) + if isinstance(input, dict): + return sim_config.input_type.model_validate(input) + if isinstance(input, str): + return sim_config.input_type.model_validate_json(input) + return input diff --git a/src/bedrock_agentcore/runtime/a2a.py b/src/bedrock_agentcore/runtime/a2a.py index efbe9002..5e64feb7 100644 --- a/src/bedrock_agentcore/runtime/a2a.py +++ b/src/bedrock_agentcore/runtime/a2a.py @@ -9,17 +9,21 @@ import uuid from typing import Any, Callable, Optional +from ..config_bundle.baggage import _extract_baggage from .context import BedrockAgentCoreContext from .models import ( ACCESS_TOKEN_HEADER, AGENTCORE_RUNTIME_URL_ENV, AUTHORIZATION_HEADER, + BAGGAGE_KEY_EXPERIMENT_ARN, + BAGGAGE_KEY_EXPERIMENT_VARIANT, CUSTOM_HEADER_PREFIX, OAUTH2_CALLBACK_URL_HEADER, REQUEST_ID_HEADER, SESSION_HEADER, PingStatus, ) +from .tracing import _ensure_baggage_processor_registered logger = logging.getLogger(__name__) @@ -97,6 +101,11 @@ class BedrockCallContextBuilder: automatically calls ``build()`` on every incoming request. """ + def __init__(self) -> None: + """Initialize BedrockCallContextBuilder and register the baggage span processor.""" + # Register early so the ASGI entry span (POST /invocations) gets stamped. + _ensure_baggage_processor_registered() + def build(self, request: Any) -> Any: """Build a ServerCallContext from a Starlette Request. @@ -132,6 +141,21 @@ def build(self, request: Any) -> Any: if request_headers: BedrockAgentCoreContext.set_request_headers(request_headers) + all_baggage: dict = {} + try: + all_baggage = _extract_baggage(headers) + except Exception as e: + logger.warning( + "Failed to parse baggage: %s: %s — raw baggage: %r", + type(e).__name__, + e, + headers.get("baggage", ""), + ) + experiment_arn = next(iter(all_baggage.get(BAGGAGE_KEY_EXPERIMENT_ARN, [])), None) + experiment_variant = next(iter(all_baggage.get(BAGGAGE_KEY_EXPERIMENT_VARIANT, [])), None) + BedrockAgentCoreContext.set_routing_experiment(experiment_arn, experiment_variant) + _ensure_baggage_processor_registered() + state = { "request_id": request_id, "session_id": session_id, diff --git a/src/bedrock_agentcore/runtime/ag_ui.py b/src/bedrock_agentcore/runtime/ag_ui.py index b0d77323..a7e77be0 100644 --- a/src/bedrock_agentcore/runtime/ag_ui.py +++ b/src/bedrock_agentcore/runtime/ag_ui.py @@ -21,16 +21,20 @@ from starlette.routing import Route, WebSocketRoute from starlette.websockets import WebSocket, WebSocketDisconnect +from ..config_bundle.baggage import _extract_baggage from .context import BedrockAgentCoreContext, RequestContext from .models import ( ACCESS_TOKEN_HEADER, AUTHORIZATION_HEADER, + BAGGAGE_KEY_EXPERIMENT_ARN, + BAGGAGE_KEY_EXPERIMENT_VARIANT, CUSTOM_HEADER_PREFIX, OAUTH2_CALLBACK_URL_HEADER, REQUEST_ID_HEADER, SESSION_HEADER, PingStatus, ) +from .tracing import _ensure_baggage_processor_registered logger = logging.getLogger(__name__) @@ -83,6 +87,9 @@ def __init__( ] super().__init__(routes=routes, debug=debug, lifespan=lifespan, middleware=middleware) + # Register early so the ASGI entry span (POST /invocations) gets stamped. + _ensure_baggage_processor_registered() + def entrypoint(self, agent_or_func: Any) -> Any: """Register the agent handler for both SSE and WebSocket transports. @@ -172,6 +179,21 @@ def _build_request_context(self, request: Request | WebSocket) -> RequestContext if request_headers: BedrockAgentCoreContext.set_request_headers(request_headers) + all_baggage: dict = {} + try: + all_baggage = _extract_baggage(headers) + except Exception as e: + logger.warning( + "Failed to parse baggage: %s: %s — raw baggage: %r", + type(e).__name__, + e, + headers.get("baggage", ""), + ) + experiment_arn = next(iter(all_baggage.get(BAGGAGE_KEY_EXPERIMENT_ARN, [])), None) + experiment_variant = next(iter(all_baggage.get(BAGGAGE_KEY_EXPERIMENT_VARIANT, [])), None) + BedrockAgentCoreContext.set_routing_experiment(experiment_arn, experiment_variant) + _ensure_baggage_processor_registered() + return RequestContext( session_id=session_id, request_headers=BedrockAgentCoreContext.get_request_headers(), diff --git a/src/bedrock_agentcore/runtime/app.py b/src/bedrock_agentcore/runtime/app.py index 290a16ca..af11da94 100644 --- a/src/bedrock_agentcore/runtime/app.py +++ b/src/bedrock_agentcore/runtime/app.py @@ -9,6 +9,7 @@ import inspect import json import logging +import os import queue import threading import time @@ -25,10 +26,15 @@ from starlette.types import Lifespan from starlette.websockets import WebSocket, WebSocketDisconnect +from ..config_bundle.baggage import _extract_baggage, _parse_config_bundle_baggage +from ..config_bundle.bundle import ConfigBundleRef +from ..config_bundle.client import ConfigBundleClient from .context import BedrockAgentCoreContext, RequestContext from .models import ( ACCESS_TOKEN_HEADER, AUTHORIZATION_HEADER, + BAGGAGE_KEY_EXPERIMENT_ARN, + BAGGAGE_KEY_EXPERIMENT_VARIANT, CUSTOM_HEADER_PREFIX, OAUTH2_CALLBACK_URL_HEADER, REQUEST_ID_HEADER, @@ -40,8 +46,53 @@ TASK_ACTION_PING_STATUS, PingStatus, ) +from .tracing import _ensure_baggage_processor_registered from .utils import convert_complex_objects +# Sentinel so we only parse OTEL_RESOURCE_ATTRIBUTES once per process. +_UNRESOLVED = object() +_runtime_arn_cache: object = _UNRESOLVED +_runtime_arn_lock: threading.Lock = threading.Lock() + + +def _parse_runtime_arn() -> Optional[str]: + """Return the runtime ARN for this process, derived from OTEL_RESOURCE_ATTRIBUTES. + + Reads the ``cloud.resource_id`` attribute, which OTEL sets to either a + runtime ARN or a runtime-endpoint ARN. + Runtime-endpoint ARNs are normalised to a plain runtime ARN by stripping + the ``/runtime-endpoint/...`` suffix. + + The result is cached after the first call — the env var does not change + during the process lifetime. + + Returns ``None`` when the env var is absent or ``cloud.resource_id`` is + not present. + """ + global _runtime_arn_cache + if _runtime_arn_cache is not _UNRESOLVED: + return _runtime_arn_cache # type: ignore[return-value] + + with _runtime_arn_lock: + if _runtime_arn_cache is not _UNRESOLVED: + return _runtime_arn_cache # type: ignore[return-value] + + result: Optional[str] = None + otel_attrs = os.environ.get("OTEL_RESOURCE_ATTRIBUTES", "") + for attr in otel_attrs.split(","): + attr = attr.strip() + if not attr.startswith("cloud.resource_id="): + continue + arn = attr[len("cloud.resource_id=") :] + # Normalise runtime-endpoint ARN → runtime ARN. + if "/runtime-endpoint/" in arn: + arn = arn.split("/runtime-endpoint/")[0] + result = arn + break + + _runtime_arn_cache = result + return result + def _is_async_callable(obj: Any) -> bool: """Check if obj is async-callable, unwrapping functools.partial.""" @@ -128,6 +179,16 @@ def __init__( self._worker_thread: Optional[threading.Thread] = None self._worker_loop_lock: threading.Lock = threading.Lock() + # Config bundle client — created lazily on first use. + # _resolve_bundle_config is wrapped with lru_cache(maxsize=30), keyed by + # ConfigBundleRef. The API is called at most once per unique ref across all + # requests on this app instance (one process = one microVM = one app instance). + self._config_client: Optional[ConfigBundleClient] = None + self._config_client_lock: threading.Lock = threading.Lock() + self._resolve_bundle_config = functools.lru_cache(maxsize=30)( # type: ignore[method-assign] + self._resolve_bundle_config + ) + routes = [ Route("/invocations", self._handle_invocation, methods=["POST"]), Route("/ping", self._handle_ping, methods=["GET"]), @@ -144,6 +205,11 @@ def __init__( self.logger.addHandler(handler) self.logger.setLevel(logging.DEBUG if self.debug else logging.INFO) + # Register early so the ASGI entry span (POST /invocations) gets stamped. + # In the managed runtime ADOT sets up the TracerProvider before __init__ runs, + # so this call lands on the real provider rather than the no-op default. + _ensure_baggage_processor_registered() + def entrypoint(self, func: Callable) -> Callable: """Decorator to register a function as the main entrypoint. @@ -363,6 +429,38 @@ def _build_request_context(self, request) -> RequestContext: if request_headers: BedrockAgentCoreContext.set_request_headers(request_headers) + # Parse baggage once; reuse for both config bundle and routing experiment. + all_baggage: dict = {} + bundle_ref = None + try: + all_baggage = _extract_baggage(headers) + bundle_ref = _parse_config_bundle_baggage(all_baggage) + except Exception as e: + self.logger.warning( + "Failed to parse baggage: %s: %s — raw baggage: %r", + type(e).__name__, + e, + headers.get("baggage", ""), + ) + + if bundle_ref is not None: + self.logger.info("Received config bundle ref: %s", bundle_ref.bundle_id) + BedrockAgentCoreContext.set_config_bundle_ref(bundle_ref) + BedrockAgentCoreContext._set_bundle_loader( + fetcher=lambda: self._resolve_bundle_config(bundle_ref), + ) + else: + self.logger.debug("No config bundle ref found in request baggage") + BedrockAgentCoreContext.set_config_bundle_ref(None) + BedrockAgentCoreContext._clear_bundle_loader() + + experiment_arn = next(iter(all_baggage.get(BAGGAGE_KEY_EXPERIMENT_ARN, [])), None) + experiment_variant = next(iter(all_baggage.get(BAGGAGE_KEY_EXPERIMENT_VARIANT, [])), None) + BedrockAgentCoreContext.set_routing_experiment(experiment_arn, experiment_variant) + # Re-registers if the TracerProvider was replaced after __init__ ran + # (e.g. a framework calling set_tracer_provider during first-request setup). + _ensure_baggage_processor_registered() + # Get the headers from context to pass to RequestContext req_headers = BedrockAgentCoreContext.get_request_headers() @@ -377,6 +475,54 @@ def _build_request_context(self, request) -> RequestContext: BedrockAgentCoreContext.set_request_context(request_id, None) return RequestContext(session_id=None, request=None) + def _get_config_client(self) -> ConfigBundleClient: + """Return the config client, creating it lazily once per process.""" + if self._config_client is None: + with self._config_client_lock: + if self._config_client is None: + self._config_client = ConfigBundleClient() + return self._config_client + + def _resolve_bundle_config(self, ref: ConfigBundleRef) -> Dict[str, Any]: + """Fetch bundle from API and return this runtime's config section. + + Manages client lifecycle, API call, runtime ARN filtering. + Called by _DeferredBundleConfig.get() on a cache miss — at most once per + unique (bundle_id, bundle_version) across all requests. + """ + self.logger.debug("Fetching config bundle %r version %r", ref.bundle_id, ref.bundle_version) + try: + response = self._get_config_client().get_configuration_bundle_version( + bundleId=ref.bundle_id, versionId=ref.bundle_version + ) + except Exception as e: + self.logger.error( + "Failed to fetch config bundle %r version %r: %s: %s", + ref.bundle_id, + ref.bundle_version, + type(e).__name__, + e, + ) + raise + + components = response.get("components", {}) + runtime_arn = _parse_runtime_arn() + if runtime_arn is None: + self.logger.warning("OTEL_RESOURCE_ATTRIBUTES not set — cannot select config component") + return {} + + component = components.get(runtime_arn) + if component is None: + self.logger.warning( + "Runtime ARN %r not found in bundle %r — available: %s", + runtime_arn, + ref.bundle_id, + list(components.keys()), + ) + return {} + + return component.get("configuration", {}) + def _takes_context(self, handler: Callable) -> bool: try: params = list(inspect.signature(handler).parameters.keys()) diff --git a/src/bedrock_agentcore/runtime/context.py b/src/bedrock_agentcore/runtime/context.py index b0c3090d..b5f20221 100644 --- a/src/bedrock_agentcore/runtime/context.py +++ b/src/bedrock_agentcore/runtime/context.py @@ -3,11 +3,16 @@ Contains metadata extracted from HTTP requests that handlers can optionally access. """ +import logging from contextvars import ContextVar -from typing import Any, Dict, Optional +from typing import Any, Callable, Dict, Optional from pydantic import BaseModel, Field +from ..config_bundle.bundle import ConfigBundleRef + +logger = logging.getLogger(__name__) + class RequestContext(BaseModel): """Request context containing metadata from HTTP requests.""" @@ -30,6 +35,15 @@ class BedrockAgentCoreContext: _request_id: ContextVar[Optional[str]] = ContextVar("request_id") _session_id: ContextVar[Optional[str]] = ContextVar("session_id") _request_headers: ContextVar[Optional[Dict[str, str]]] = ContextVar("request_headers") + _routing_experiment_arn: ContextVar[Optional[str]] = ContextVar("routing_experiment_arn", default=None) + _routing_experiment_variant: ContextVar[Optional[str]] = ContextVar("routing_experiment_variant", default=None) + + # Config bundle — ref identifies the bundle for this request. + # _bundle_fetcher is the lru_cache-wrapped app._resolve_bundle_config(ref), + # set per-request by the app. Calling it fetches from the API on first use + # for a given bundle version, then returns the cached result on subsequent calls. + _config_bundle_ref: ContextVar[Optional[ConfigBundleRef]] = ContextVar("config_bundle_ref", default=None) + _bundle_fetcher: ContextVar[Optional[Callable[[], Dict[str, Any]]]] = ContextVar("bundle_fetcher", default=None) @classmethod def set_workload_access_token(cls, token: str): @@ -91,3 +105,85 @@ def get_request_headers(cls) -> Optional[Dict[str, str]]: return cls._request_headers.get() except LookupError: return None + + @classmethod + def set_routing_experiment(cls, arn: Optional[str], variant: Optional[str]) -> None: + """Store routing experiment identifiers for the current request. + + .. warning:: + This feature is in preview and may change in future releases. + """ + cls._routing_experiment_arn.set(arn) + cls._routing_experiment_variant.set(variant) + + @classmethod + def get_routing_experiment_arn(cls) -> Optional[str]: + """Return the routing experiment ARN for the current request, or None. + + .. warning:: + This feature is in preview and may change in future releases. + """ + return cls._routing_experiment_arn.get() + + @classmethod + def get_routing_experiment_variant(cls) -> Optional[str]: + """Return the routing experiment variant name for the current request, or None. + + .. warning:: + This feature is in preview and may change in future releases. + """ + return cls._routing_experiment_variant.get() + + @classmethod + def set_config_bundle_ref(cls, ref: Optional[ConfigBundleRef]) -> None: + """Set the configuration bundle reference for the current request. + + .. warning:: + This feature is in preview and may change in future releases. + """ + cls._config_bundle_ref.set(ref) + + @classmethod + def get_config_bundle_ref(cls) -> Optional[ConfigBundleRef]: + """Get the configuration bundle reference for the current request. + + .. warning:: + This feature is in preview and may change in future releases. + """ + return cls._config_bundle_ref.get() + + @classmethod + def _set_bundle_loader(cls, fetcher: Callable[[], Dict[str, Any]]) -> None: + """Register the config fetcher for this request. Called by the app. + + The fetcher is lru_cache-wrapped app._resolve_bundle_config(ref), so the + underlying API call is made at most once per unique bundle version across + all requests on this app instance. + """ + cls._bundle_fetcher.set(fetcher) + + @classmethod + def _clear_bundle_loader(cls) -> None: + """Clear the config fetcher. Called by the app when no bundle ref is present.""" + cls._bundle_fetcher.set(None) + + @classmethod + def get_config_bundle(cls) -> Dict[str, Any]: + """Return this runtime's config from the current request's bundle. + + .. warning:: + This feature is in preview and may change in future releases. + + Fetches from the API on the first call for a given bundle version, then + serves from the per-app-instance LRU cache on all subsequent calls. + Returns {} if no bundle ref is present in the request baggage. + + Raises: + Exception: Propagated from the underlying API call if the config + bundle service is unavailable. Callers that require graceful + degradation should catch and fall back to their own defaults. + """ + fetcher = cls._bundle_fetcher.get() + if fetcher is None: + return {} + return fetcher() diff --git a/src/bedrock_agentcore/runtime/models.py b/src/bedrock_agentcore/runtime/models.py index 6fc33f9c..b9dab21d 100644 --- a/src/bedrock_agentcore/runtime/models.py +++ b/src/bedrock_agentcore/runtime/models.py @@ -22,6 +22,10 @@ class PingStatus(str, Enum): CUSTOM_HEADER_PREFIX = "X-Amzn-Bedrock-AgentCore-Runtime-Custom-" AGENTCORE_RUNTIME_URL_ENV = "AGENTCORE_RUNTIME_URL" +# Baggage keys for routing experiment span attributes +BAGGAGE_KEY_EXPERIMENT_ARN = "aws.agentcore.gateway.routing_experiment_arn" +BAGGAGE_KEY_EXPERIMENT_VARIANT = "aws.agentcore.gateway.routing_experiment_variant_name" + # Task action constants TASK_ACTION_PING_STATUS = "ping_status" TASK_ACTION_JOB_STATUS = "job_status" diff --git a/src/bedrock_agentcore/runtime/tracing.py b/src/bedrock_agentcore/runtime/tracing.py new file mode 100644 index 00000000..7c0cf9cb --- /dev/null +++ b/src/bedrock_agentcore/runtime/tracing.py @@ -0,0 +1,145 @@ +"""OpenTelemetry span processor for Bedrock AgentCore routing experiment attributes. + +Provides ``BaggageSpanProcessor``, which stamps every span with the routing +experiment ARN and variant name extracted from the request's W3C baggage header. +Values are read from two sources in priority order: + +1. ``BedrockAgentCoreContext`` ContextVars — set by ``_build_request_context`` + after the baggage header is parsed. Covers agent/tool spans created during + handler execution. +2. OTel baggage in the span's ``parent_context`` — covers spans started by ASGI + instrumentation *before* ``_build_request_context`` runs (e.g. the root + ``POST /invocations`` server span), where the propagator has already + extracted the W3C baggage into the OTel context. + +Auto-registration +----------------- +``_ensure_baggage_processor_registered()`` is called by the SDK on every +request. It registers ``BaggageSpanProcessor`` on the active +``TracerProvider`` the first time it is called, and re-registers whenever +``get_tracer_provider()`` returns a different provider instance than the one +last seen — which handles the case where a framework replaces the global +provider after the app is constructed. +""" + +import logging +import threading +from typing import Optional + +from .context import BedrockAgentCoreContext as _context + +logger = logging.getLogger(__name__) + +# Module-level state for provider-tracking auto-registration. +_registration_lock = threading.Lock() +_registered_on: Optional[object] = None # the TracerProvider instance we last registered on + + +def _ensure_baggage_processor_registered() -> None: + """Register ``BaggageSpanProcessor`` on the current ``TracerProvider`` if needed. + + No-ops when ``opentelemetry-api`` is not installed. + Re-registers automatically when the global provider has been replaced since + the last call (e.g. by a framework that calls set_tracer_provider at startup). + """ + global _registered_on + try: + from opentelemetry import trace + + provider = trace.get_tracer_provider() + if provider is _registered_on: + return + with _registration_lock: + # Re-check inside the lock — another thread may have registered first. + provider = trace.get_tracer_provider() + if provider is _registered_on: + return + provider.add_span_processor(BaggageSpanProcessor()) + _registered_on = provider + logger.debug("BaggageSpanProcessor registered on %s", type(provider).__name__) + except ImportError: + logger.debug("opentelemetry-api not installed; BaggageSpanProcessor registration skipped") + except Exception: + logger.debug("Could not register BaggageSpanProcessor", exc_info=True) + + +def _get_base_class() -> type: + """Return the OTel SDK SpanProcessor base if available, otherwise object. + + ``opentelemetry-sdk`` is not a required dependency of this package, so the + import may fail. Deferring it here (rather than at module level) means the + module loads cleanly regardless — a module-level ImportError would crash + ``BedrockAgentCoreApp.__init__`` even for users who don't use OTel at all. + + When the SDK *is* present, inheriting from ``SpanProcessor`` is required: + ``SynchronousMultiSpanProcessor`` calls internal hooks like ``_on_ending`` + that only exist on the SDK base class. + + When the SDK is absent, ``ProxyTracerProvider`` has no ``add_span_processor``, + so ``_ensure_baggage_processor_registered`` no-ops before the processor is + ever used — the ``object`` fallback is effectively dead code at runtime. + """ + try: + from opentelemetry.sdk.trace import SpanProcessor # type: ignore[import] + + return SpanProcessor + except ImportError: + return object + + +class BaggageSpanProcessor(_get_base_class()): # type: ignore[misc] + """SpanProcessor that stamps every span with routing experiment attributes. + + .. warning:: + This feature is in preview and may change in future releases. + + Reads ``BedrockAgentCoreContext.get_routing_experiment_arn()`` and + ``get_routing_experiment_variant()`` from ContextVars on ``on_start``, + so each concurrent request gets its own values with no cross-talk. + + Span attributes set (when the corresponding baggage key is present): + - ``aws.agentcore.gateway.routing_experiment_arn`` + - ``aws.agentcore.gateway.routing_experiment_variant_name`` + """ + + def on_start(self, span: object, parent_context: Optional[object] = None) -> None: + """Set routing experiment attributes on every new span. + + Primary source: ContextVars set by ``_build_request_context`` — covers + all spans created after request parsing (agent spans, tool spans, etc.). + + Fallback: OTel baggage in ``parent_context`` — covers spans created by + ASGI instrumentation before ``_build_request_context`` runs (e.g. + ``POST /invocations``), where the propagator has already extracted the + W3C baggage header into the context. + """ + arn = _context.get_routing_experiment_arn() + variant = _context.get_routing_experiment_variant() + + if (arn is None or variant is None) and parent_context is not None: + try: + from opentelemetry import baggage as otel_baggage + + if arn is None: + arn = otel_baggage.get_baggage("aws.agentcore.gateway.routing_experiment_arn", parent_context) + if variant is None: + variant = otel_baggage.get_baggage( + "aws.agentcore.gateway.routing_experiment_variant_name", parent_context + ) + except ImportError: + logger.debug("opentelemetry-api not installed; parent_context baggage fallback skipped") + + if arn is not None: + span.set_attribute("aws.agentcore.gateway.routing_experiment_arn", arn) # type: ignore[union-attr] + if variant is not None: + span.set_attribute("aws.agentcore.gateway.routing_experiment_variant_name", variant) # type: ignore[union-attr] + + def on_end(self, span: object) -> None: + """No-op.""" + + def shutdown(self) -> None: + """No-op.""" + + def force_flush(self, timeout_millis: int = 30000) -> bool: + """No-op — returns True to indicate success.""" + return True diff --git a/tests/bedrock_agentcore/config_bundle/__init__.py b/tests/bedrock_agentcore/config_bundle/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/bedrock_agentcore/config_bundle/test_baggage.py b/tests/bedrock_agentcore/config_bundle/test_baggage.py new file mode 100644 index 00000000..e4cf0062 --- /dev/null +++ b/tests/bedrock_agentcore/config_bundle/test_baggage.py @@ -0,0 +1,107 @@ +"""Tests for baggage parsing utilities.""" + +from bedrock_agentcore.config_bundle.baggage import _extract_baggage, _parse_config_bundle_baggage +from bedrock_agentcore.config_bundle.bundle import ConfigBundleRef + +ARN = "arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/my-agent" +ARN2 = "arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/other-agent" +ARN_KEY = "aws.agentcore.configbundle_arn" +VERSION_KEY = "aws.agentcore.configbundle_version" + + +class TestExtractBaggage: + def test_single_baggage_header(self): + headers = [("baggage", f"{ARN_KEY}={ARN},{VERSION_KEY}=2")] + result = _extract_baggage(headers) + assert result == {ARN_KEY: [ARN], VERSION_KEY: ["2"]} + + def test_multiple_baggage_headers_same_name(self): + headers = [ + ("baggage", f"{ARN_KEY}={ARN},{VERSION_KEY}=2"), + ("baggage", f"{ARN_KEY}={ARN2},{VERSION_KEY}=5"), + ] + result = _extract_baggage(headers) + assert result[ARN_KEY] == [ARN, ARN2] + assert result[VERSION_KEY] == ["2", "5"] + + def test_duplicate_key_in_single_header(self): + headers = [("baggage", f"{ARN_KEY}={ARN},{ARN_KEY}={ARN2}")] + result = _extract_baggage(headers) + assert result[ARN_KEY] == [ARN, ARN2] + + def test_ignores_non_baggage_headers(self): + headers = [ + ("content-type", "application/json"), + ("baggage", f"{ARN_KEY}={ARN},{VERSION_KEY}=1"), + ("authorization", "Bearer token"), + ] + result = _extract_baggage(headers) + assert set(result.keys()) == {ARN_KEY, VERSION_KEY} + + def test_case_insensitive_header_name(self): + headers = [("Baggage", f"{ARN_KEY}={ARN},{VERSION_KEY}=3")] + result = _extract_baggage(headers) + assert result[ARN_KEY] == [ARN] + + def test_strips_properties_after_semicolon(self): + headers = [("baggage", f"{ARN_KEY}={ARN};meta=x,{VERSION_KEY}=1;ttl=60")] + result = _extract_baggage(headers) + assert result[ARN_KEY] == [ARN] + assert result[VERSION_KEY] == ["1"] + + def test_empty_baggage_header(self): + result = _extract_baggage([]) + assert result == {} + + def test_malformed_entries_skipped(self): + headers = [("baggage", f"no-equals,{ARN_KEY}={ARN},{VERSION_KEY}=2")] + result = _extract_baggage(headers) + assert result[ARN_KEY] == [ARN] + assert result[VERSION_KEY] == ["2"] + + def test_empty_key_skipped(self): + # "=value" has no key — must not insert "" into result + headers = [("baggage", f"=orphan,{ARN_KEY}={ARN}")] + result = _extract_baggage(headers) + assert "" not in result + assert result[ARN_KEY] == [ARN] + + def test_empty_value_skipped(self): + # "key=" has an empty value — must not append "" to the list + headers = [("baggage", f"{ARN_KEY}=,{VERSION_KEY}=2")] + result = _extract_baggage(headers) + assert ARN_KEY not in result + assert result[VERSION_KEY] == ["2"] + + def test_extra_whitespace_stripped(self): + headers = [("baggage", f" {ARN_KEY} = {ARN} , {VERSION_KEY} = 7 ")] + result = _extract_baggage(headers) + assert result[ARN_KEY] == [ARN] + assert result[VERSION_KEY] == ["7"] + + +class TestParseConfigBundleBaggage: + def test_single_bundle(self): + ref = _parse_config_bundle_baggage({ARN_KEY: [ARN], VERSION_KEY: ["2"]}) + assert ref == ConfigBundleRef(bundle_arn=ARN, bundle_version="2") + + def test_multiple_arns_uses_first(self): + ref = _parse_config_bundle_baggage({ARN_KEY: [ARN, ARN2], VERSION_KEY: ["2", "5"]}) + assert ref == ConfigBundleRef(bundle_arn=ARN, bundle_version="2") + + def test_empty_baggage(self): + assert _parse_config_bundle_baggage({}) is None + + def test_missing_version(self): + assert _parse_config_bundle_baggage({ARN_KEY: [ARN]}) is None + + def test_missing_arn(self): + assert _parse_config_bundle_baggage({VERSION_KEY: ["2"]}) is None + + def test_unrelated_keys_ignored(self): + ref = _parse_config_bundle_baggage({ARN_KEY: [ARN], VERSION_KEY: ["2"], "other_key": ["other_value"]}) + assert ref == ConfigBundleRef(bundle_arn=ARN, bundle_version="2") + + def test_returns_single_ref(self): + ref = _parse_config_bundle_baggage({ARN_KEY: [ARN], VERSION_KEY: ["1"]}) + assert isinstance(ref, ConfigBundleRef) diff --git a/tests/bedrock_agentcore/config_bundle/test_bundle.py b/tests/bedrock_agentcore/config_bundle/test_bundle.py new file mode 100644 index 00000000..2b4e7bc6 --- /dev/null +++ b/tests/bedrock_agentcore/config_bundle/test_bundle.py @@ -0,0 +1,62 @@ +"""Tests for ConfigBundleRef.""" + +import pytest + +from bedrock_agentcore.config_bundle.bundle import ConfigBundleRef + + +class TestConfigBundleRef: + def test_bundle_id_extracted_from_arn(self): + ref = ConfigBundleRef( + bundle_arn="arn:aws:bedrock-agentcore:us-west-2:123456789012:configuration-bundle/my-agent-ab12cd34ef", + bundle_version="2", + ) + assert ref.bundle_id == "my-agent-ab12cd34ef" + + def test_bundle_id_extracted_from_short_arn(self): + ref = ConfigBundleRef( + bundle_arn="arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/my-agent", + bundle_version="1", + ) + assert ref.bundle_id == "my-agent" + + def test_bundle_version_preserved(self): + ref = ConfigBundleRef( + bundle_arn="arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/my-agent", + bundle_version="42", + ) + assert ref.bundle_version == "42" + + def test_frozen(self): + ref = ConfigBundleRef( + bundle_arn="arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/my-agent", + bundle_version="1", + ) + with pytest.raises((AttributeError, TypeError)): + ref.bundle_arn = "other" + + def test_empty_arn_raises(self): + with pytest.raises(ValueError, match="bundle_arn must not be empty"): + ConfigBundleRef(bundle_arn="", bundle_version="1") + + def test_empty_version_raises(self): + with pytest.raises(ValueError, match="bundle_version must not be empty"): + ConfigBundleRef(bundle_arn="arn:aws:...:bundle/my-agent", bundle_version="") + + def test_arn_without_slash_raises(self): + with pytest.raises(ValueError, match="does not contain a valid bundle ID segment"): + ConfigBundleRef(bundle_arn="not-an-arn", bundle_version="1") + + def test_arn_with_trailing_slash_raises(self): + with pytest.raises(ValueError, match="does not contain a valid bundle ID segment"): + ConfigBundleRef(bundle_arn="arn:aws:...:bundle/", bundle_version="1") + + def test_equality(self): + ref1 = ConfigBundleRef(bundle_arn="arn:aws:...:bundle/my-agent", bundle_version="1") + ref2 = ConfigBundleRef(bundle_arn="arn:aws:...:bundle/my-agent", bundle_version="1") + assert ref1 == ref2 + + def test_inequality_different_version(self): + ref1 = ConfigBundleRef(bundle_arn="arn:aws:...:bundle/my-agent", bundle_version="1") + ref2 = ConfigBundleRef(bundle_arn="arn:aws:...:bundle/my-agent", bundle_version="2") + assert ref1 != ref2 diff --git a/tests/bedrock_agentcore/config_bundle/test_client.py b/tests/bedrock_agentcore/config_bundle/test_client.py new file mode 100644 index 00000000..812db0f6 --- /dev/null +++ b/tests/bedrock_agentcore/config_bundle/test_client.py @@ -0,0 +1,58 @@ +"""Tests for ConfigBundleClient.""" + +from unittest.mock import MagicMock + +import pytest + +from bedrock_agentcore.config_bundle.client import ConfigBundleClient + + +class TestConfigBundleClient: + def test_boto_client_created_lazily_on_first_access(self): + mock_session = MagicMock() + mock_boto_client = MagicMock() + mock_session.client.return_value = mock_boto_client + + client = ConfigBundleClient(region_name="us-east-1", boto3_session=mock_session) + + # No boto3 client created yet + mock_session.client.assert_not_called() + + # Trigger lazy init via __getattr__ with an allowed operation + _ = client.list_configuration_bundles + + mock_session.client.assert_called_once_with( + "bedrock-agentcore-control", + region_name="us-east-1", + endpoint_url="https://bedrock-agentcore-control.us-east-1.amazonaws.com", + ) + + def test_boto_client_reused_across_calls(self): + mock_session = MagicMock() + mock_boto_client = MagicMock() + mock_session.client.return_value = mock_boto_client + + client = ConfigBundleClient(region_name="us-east-1", boto3_session=mock_session) + _ = client.list_configuration_bundles + _ = client.list_configuration_bundles + + mock_session.client.assert_called_once() + + def test_getattr_forwards_to_boto_client(self): + mock_session = MagicMock() + mock_boto_client = MagicMock() + mock_session.client.return_value = mock_boto_client + + client = ConfigBundleClient(region_name="us-east-1", boto3_session=mock_session) + client.list_configuration_bundles(maxResults=10) + + mock_boto_client.list_configuration_bundles.assert_called_once_with(maxResults=10) + + def test_disallowed_operation_raises_attribute_error(self): + mock_session = MagicMock() + mock_session.client.return_value = MagicMock() + + client = ConfigBundleClient(region_name="us-east-1", boto3_session=mock_session) + + with pytest.raises(AttributeError, match="does not expose operation 'create_evaluator'"): + _ = client.create_evaluator diff --git a/tests/bedrock_agentcore/evaluation/runner/batch/__init__.py b/tests/bedrock_agentcore/evaluation/runner/batch/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/bedrock_agentcore/evaluation/runner/batch/test_batch_evaluation_models.py b/tests/bedrock_agentcore/evaluation/runner/batch/test_batch_evaluation_models.py new file mode 100644 index 00000000..24fa7860 --- /dev/null +++ b/tests/bedrock_agentcore/evaluation/runner/batch/test_batch_evaluation_models.py @@ -0,0 +1,26 @@ +"""Unit tests for session source config to_data_source_config methods.""" + +from datetime import datetime, timezone + +from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_models import ( + CloudWatchDataSourceConfig, +) + +_T0 = datetime(2024, 1, 1, tzinfo=timezone.utc) +_T1 = datetime(2024, 1, 2, tzinfo=timezone.utc) + +_CW_SOURCE = CloudWatchDataSourceConfig( + service_names=["my-service"], + log_group_names=["/aws/my-log-group"], + ingestion_delay_seconds=0, +) + + +def test_cloudwatch_to_data_source_config_returns_session_ids(): + result = _CW_SOURCE.to_data_source_config(["s1", "s2"], _T0, _T1) + cw = result["cloudWatchLogs"] + assert cw["serviceNames"] == ["my-service"] + assert cw["logGroupNames"] == ["/aws/my-log-group"] + assert cw["filterConfig"]["sessionIds"] == ["s1", "s2"] + assert cw["filterConfig"]["timeRange"]["startTime"] == _T0 + assert cw["filterConfig"]["timeRange"]["endTime"] == _T1 diff --git a/tests/bedrock_agentcore/evaluation/runner/batch/test_batch_evaluation_runner.py b/tests/bedrock_agentcore/evaluation/runner/batch/test_batch_evaluation_runner.py new file mode 100644 index 00000000..db2f7138 --- /dev/null +++ b/tests/bedrock_agentcore/evaluation/runner/batch/test_batch_evaluation_runner.py @@ -0,0 +1,718 @@ +"""Unit tests for BatchEvaluationRunner.""" + +from datetime import datetime, timezone +from unittest.mock import MagicMock, patch + +import pytest +from pydantic import ValidationError + +from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_models import ( + BatchEvaluationRunConfig, + BatchEvaluatorConfig, + CloudWatchDataSourceConfig, + CloudWatchOutputDataConfig, +) +from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_runner import ( + BatchEvaluationRunner, +) +from bedrock_agentcore.evaluation.runner.dataset_types import ( + ActorProfile, + Dataset, + PredefinedScenario, + SimulatedScenario, + Turn, +) +from bedrock_agentcore.evaluation.runner.invoker_types import AgentInvokerInput, AgentInvokerOutput + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + +_T0 = datetime(2024, 1, 1, 0, 0, 0, tzinfo=timezone.utc) +_T1 = datetime(2024, 1, 1, 0, 1, 0, tzinfo=timezone.utc) + +_SCENARIO = PredefinedScenario( + scenario_id="s1", + turns=[Turn(input="hello", expected_response="world")], + assertions=["Be helpful"], +) + +_DATASET = Dataset(scenarios=[_SCENARIO]) + + +def _make_invoker(): + def invoker(inp: AgentInvokerInput) -> AgentInvokerOutput: + return AgentInvokerOutput(agent_output="ok") + + return invoker + + +def _make_cw_source(): + return CloudWatchDataSourceConfig( + service_names=["my-service"], + log_group_names=["/aws/my-log-group"], + ingestion_delay_seconds=0, + ) + + +def _make_config(source=None): + return BatchEvaluationRunConfig( + batch_evaluation_name="test-eval", + evaluator_config=BatchEvaluatorConfig(evaluator_ids=["Builtin.Helpfulness"]), + data_source=source or _make_cw_source(), + max_concurrent_scenarios=2, + polling_timeout_seconds=60, + polling_interval_seconds=5, + ) + + +def _make_start_response(batch_evaluation_id="eval-001"): + return { + "batchEvaluationId": batch_evaluation_id, + "batchEvaluationArn": f"arn:aws:bedrock-agentcore:us-west-2:123:batch-evaluation/{batch_evaluation_id}", + "batchEvaluationName": "test-eval", + "status": "PENDING", + "createdAt": _T0, + } + + +def _make_completed_response(batch_evaluation_id="eval-001", with_output_data=False, with_eval_results=False): + response = { + "batchEvaluationId": batch_evaluation_id, + "batchEvaluationArn": f"arn:aws:bedrock-agentcore:us-west-2:123:batch-evaluation/{batch_evaluation_id}", + "batchEvaluationName": "test-eval", + "status": "COMPLETED", + "createdAt": _T0, + } + if with_output_data: + response["outputConfig"] = { + "cloudWatchConfig": { + "logGroupName": "/aws/agentcore/evaluation/results", + "logStreamName": "batch-eval-001", + } + } + if with_eval_results: + response["evaluationResults"] = { + "numberOfSessionsCompleted": 1, + "numberOfSessionsInProgress": 0, + "numberOfSessionsFailed": 0, + "totalNumberOfSessions": 1, + "evaluatorSummaries": [ + { + "evaluatorId": "Builtin.Helpfulness", + "statistics": { + "averageScore": 0.9, + }, + "totalEvaluated": 1, + "totalFailed": 0, + } + ], + } + return response + + +# --------------------------------------------------------------------------- +# Helpers: patch runner internals +# --------------------------------------------------------------------------- + + +def _make_runner(): + with patch.object(BatchEvaluationRunner, "__init__", lambda self, **kw: None): + runner = BatchEvaluationRunner.__new__(BatchEvaluationRunner) + runner.region = "us-east-1" + runner.data_plane_client = MagicMock() + runner._logs_client = MagicMock() + return runner + + +# --------------------------------------------------------------------------- +# BatchEvaluationRunConfig validators (#13) +# --------------------------------------------------------------------------- + + +def test_run_config_polling_timeout_must_exceed_interval(): + # Equal counts as invalid (validator uses <=) + with pytest.raises(ValidationError, match="polling_timeout_seconds"): + BatchEvaluationRunConfig( + batch_evaluation_name="x", + evaluator_config=BatchEvaluatorConfig(evaluator_ids=["Builtin.Helpfulness"]), + data_source=_make_cw_source(), + polling_timeout_seconds=5, + polling_interval_seconds=5, + ) + + +def test_run_config_max_concurrent_must_be_positive(): + with pytest.raises(ValidationError, match="max_concurrent_scenarios"): + BatchEvaluationRunConfig( + batch_evaluation_name="x", + evaluator_config=BatchEvaluatorConfig(evaluator_ids=["Builtin.Helpfulness"]), + data_source=_make_cw_source(), + max_concurrent_scenarios=0, + polling_timeout_seconds=60, + polling_interval_seconds=5, + ) + + +# --------------------------------------------------------------------------- +# CloudWatchDataSourceConfig.pre_evaluation_run_hook (#12) +# --------------------------------------------------------------------------- + + +@patch("time.sleep") +def test_pre_evaluation_run_hook_sleeps_when_delay_nonzero(mock_sleep): + source = CloudWatchDataSourceConfig( + service_names=["svc"], + log_group_names=["/aws/logs"], + ingestion_delay_seconds=30, + ) + source.pre_evaluation_run_hook() + mock_sleep.assert_called_once_with(30) + + +@patch("time.sleep") +def test_pre_evaluation_run_hook_skips_sleep_when_delay_zero(mock_sleep): + source = CloudWatchDataSourceConfig( + service_names=["svc"], + log_group_names=["/aws/logs"], + ingestion_delay_seconds=0, + ) + source.pre_evaluation_run_hook() + mock_sleep.assert_not_called() + + +# --------------------------------------------------------------------------- +# BatchEvaluationRunner.run_dataset_evaluation() (#11) +# --------------------------------------------------------------------------- + + +def test_run_empty_dataset_raises(): + runner = _make_runner() + # Bypass Dataset validator by using a MagicMock with empty scenarios + empty_dataset = MagicMock() + empty_dataset.scenarios = [] + with pytest.raises(ValueError, match="at least one scenario"): + runner.run_dataset_evaluation(config=_make_config(), dataset=empty_dataset, agent_invoker=_make_invoker()) + + +def test_run_all_scenarios_fail_raises(): + runner = _make_runner() + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [], + [ + MagicMock(scenario_id="s1"), + ], + ), + ): + with pytest.raises(ValueError, match="All 1 scenarios failed"): + runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + + +def test_run_happy_path_returns_result(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = _make_completed_response() + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [ + MagicMock( + scenario_id="s1", + session_id="s1-session-abc", + start_time=_T0, + end_time=_T1, + ground_truth=None, + ) + ], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + + assert result.batch_evaluation_id == "eval-001" + assert result.status == "COMPLETED" + assert result.agent_invocation_failures == [] + + +def test_run_partial_failure_included_in_result(): + from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_models import FailedScenario + + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = _make_completed_response() + + failed = FailedScenario(scenario_id="s2", error_message="timeout") + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [failed], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + + assert len(result.agent_invocation_failures) == 1 + assert result.agent_invocation_failures[0].scenario_id == "s2" + + +def test_run_parses_output_data_config(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = _make_completed_response(with_output_data=True) + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + + assert result.output_data_config is not None + assert result.output_data_config.log_group_name == "/aws/agentcore/evaluation/results" + assert result.output_data_config.log_stream_name == "batch-eval-001" + + +def test_run_parses_evaluation_results(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = _make_completed_response(with_eval_results=True) + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + + assert result.evaluation_results is not None + assert result.evaluation_results.number_of_sessions_completed == 1 + assert result.evaluation_results.total_number_of_sessions == 1 + assert len(result.evaluation_results.evaluator_summaries) == 1 + summary = result.evaluation_results.evaluator_summaries[0] + assert summary.evaluator_id == "Builtin.Helpfulness" + assert summary.statistics.average_score == 0.9 + + +def test_run_start_batch_evaluation_failure_raises_runtime_error(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.side_effect = Exception("service unavailable") + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + with pytest.raises(RuntimeError, match="StartBatchEvaluation failed"): + runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + + +def test_run_polling_failed_status_returns_result(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = { + **_make_completed_response(), + "status": "FAILED", + "errorDetails": ["Internal error"], + } + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + assert result.status == "FAILED" + assert result.error_details == ["Internal error"] + + +def test_run_polling_stopped_status_returns_result(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = { + **_make_completed_response(), + "status": "STOPPED", + } + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + assert result.status == "STOPPED" + + +def test_run_polling_deleting_status_returns_result(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = { + **_make_completed_response(), + "status": "DELETING", + } + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + assert result.status == "DELETING" + + +def test_run_polling_completed_with_errors_status_returns_result(): + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = { + **_make_completed_response(), + "status": "COMPLETED_WITH_ERRORS", + "errorDetails": ["Some sessions failed"], + } + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + result = runner.run_dataset_evaluation(config=_make_config(), dataset=_DATASET, agent_invoker=_make_invoker()) + assert result.status == "COMPLETED_WITH_ERRORS" + assert result.error_details == ["Some sessions failed"] + + +def test_poll_for_results_timeout_raises_timeout_error(): + # Test _poll_for_results directly to avoid logger.info calls in run() + # consuming time.time() values before start_time is captured. + runner = _make_runner() + runner.data_plane_client.get_batch_evaluation.return_value = { + **_make_completed_response(), + "status": "IN_PROGRESS", + } + + _call_count = 0 + + def _time(): + nonlocal _call_count + _call_count += 1 + return 0.0 if _call_count == 1 else 10.0 + + with patch("bedrock_agentcore.evaluation.runner.batch.batch_evaluation_runner.time.time", side_effect=_time): + with patch("bedrock_agentcore.evaluation.runner.batch.batch_evaluation_runner.time.sleep"): + with pytest.raises(TimeoutError, match="Polling timeout exceeded"): + runner._poll_for_results("eval-001", timeout=6, poll_interval=5) + + +def test_run_cloudwatch_source_passes_session_ids(): + """Runner passes session_ids to CloudWatchDataSourceConfig.""" + runner = _make_runner() + runner.data_plane_client.start_batch_evaluation.return_value = _make_start_response() + runner.data_plane_client.get_batch_evaluation.return_value = _make_completed_response() + + with patch.object( + runner, + "_execute_scenarios_parallel", + return_value=( + [MagicMock(scenario_id="s1", session_id="s1-session-abc", start_time=_T0, end_time=_T1, ground_truth=None)], + [], + ), + ): + runner.run_dataset_evaluation( + config=_make_config(source=_make_cw_source()), dataset=_DATASET, agent_invoker=_make_invoker() + ) + + call_kwargs = runner.data_plane_client.start_batch_evaluation.call_args.kwargs + filter_config = call_kwargs["dataSourceConfig"]["cloudWatchLogs"]["filterConfig"] + assert filter_config["sessionIds"] == ["s1-session-abc"] + + +# --------------------------------------------------------------------------- +# fetch_evaluation_events (#11) +# --------------------------------------------------------------------------- + + +def test_fetch_evaluation_events_raises_when_no_output_data_config(): + runner = _make_runner() + result = MagicMock() + result.output_data_config = None + result.batch_evaluation_id = "eval-001" + + with pytest.raises(ValueError, match="No output_data_config"): + runner.fetch_evaluation_events(result) + + +def test_fetch_evaluation_events_returns_parsed_events(): + runner = _make_runner() + result = MagicMock() + result.output_data_config = CloudWatchOutputDataConfig( + log_group_name="/aws/agentcore/evaluation/results", + log_stream_name="batch-eval-abc123", + ) + runner._logs_client.get_log_events.side_effect = [ + {"events": [{"message": '{"traceId": "t1"}'}], "nextForwardToken": "tok-1"}, + {"events": [], "nextForwardToken": "tok-1"}, + ] + + events = runner.fetch_evaluation_events(result) + + assert events == [{"traceId": "t1"}] + + +# --------------------------------------------------------------------------- +# _transform_ground_truth +# --------------------------------------------------------------------------- + + +def test_transform_ground_truth_returns_none_when_no_fields(): + # Use base Scenario (no turns/expected_trajectory fields) with no assertions + from bedrock_agentcore.evaluation.runner.dataset_types import Scenario + + runner = _make_runner() + scenario = Scenario(scenario_id="s1") + assert runner._transform_ground_truth(scenario) is None + + +def test_transform_ground_truth_assertions_only(): + from bedrock_agentcore.evaluation.runner.dataset_types import Scenario + + runner = _make_runner() + scenario = Scenario(scenario_id="s1", assertions=["Be concise", "Be helpful"]) + gt = runner._transform_ground_truth(scenario) + assert gt == {"assertions": [{"text": "Be concise"}, {"text": "Be helpful"}]} + + +def test_transform_ground_truth_expected_trajectory(): + runner = _make_runner() + scenario = PredefinedScenario( + scenario_id="s1", + turns=[Turn(input="hi")], + expected_trajectory=["tool_a", "tool_b"], + ) + gt = runner._transform_ground_truth(scenario) + assert gt["expectedTrajectory"] == {"toolNames": ["tool_a", "tool_b"]} + + +def test_transform_ground_truth_turns_with_expected_response(): + runner = _make_runner() + scenario = PredefinedScenario( + scenario_id="s1", + turns=[Turn(input="hi", expected_response="hello")], + ) + gt = runner._transform_ground_truth(scenario) + assert gt["turns"] == [{"input": {"prompt": "hi"}, "expectedResponse": {"text": "hello"}}] + + +def test_transform_ground_truth_turns_without_expected_response(): + runner = _make_runner() + scenario = PredefinedScenario( + scenario_id="s1", + turns=[Turn(input="hi")], + ) + gt = runner._transform_ground_truth(scenario) + assert gt["turns"] == [{"input": {"prompt": "hi"}}] + assert "expectedResponse" not in gt["turns"][0] + + +def test_transform_ground_truth_all_fields(): + runner = _make_runner() + scenario = PredefinedScenario( + scenario_id="s1", + turns=[Turn(input="hi", expected_response="hello")], + assertions=["Be helpful"], + expected_trajectory=["tool_a"], + ) + gt = runner._transform_ground_truth(scenario) + assert "assertions" in gt + assert "expectedTrajectory" in gt + assert "turns" in gt + + +def test_transform_ground_truth_simulated_no_assertions(): + runner = _make_runner() + scenario = SimulatedScenario( + scenario_id="sim-1", + scenario_description="Customer orders pizza", + actor_profile=ActorProfile(context="A customer", goal="Order pizza"), + input="Hello", + ) + assert runner._transform_ground_truth(scenario) is None + + +def test_transform_ground_truth_simulated_with_assertions(): + runner = _make_runner() + scenario = SimulatedScenario( + scenario_id="sim-2", + scenario_description="Customer orders pizza", + actor_profile=ActorProfile(context="A customer", goal="Order pizza"), + input="Hello", + assertions=["Must confirm order", "Must be polite"], + ) + gt = runner._transform_ground_truth(scenario) + assert gt == {"assertions": [{"text": "Must confirm order"}, {"text": "Must be polite"}]} + assert "turns" not in gt + assert "expectedTrajectory" not in gt + + +# --------------------------------------------------------------------------- +# _execute_scenario and _poll_for_results edge cases +# --------------------------------------------------------------------------- + + +def test_execute_scenario_unsupported_type_raises_type_error(): + from bedrock_agentcore.evaluation.runner.dataset_types import Scenario + + runner = _make_runner() + + class _CustomScenario(Scenario): + pass + + scenario = _CustomScenario(scenario_id="s1") + from bedrock_agentcore.evaluation.runner.batch.batch_evaluation_models import BatchEvaluationRunConfig + + config = BatchEvaluationRunConfig.__new__(BatchEvaluationRunConfig) + with pytest.raises(TypeError, match="Unsupported scenario type"): + runner._execute_scenario(config, scenario, _make_invoker()) + + +def test_poll_for_results_get_batch_evaluation_failure_raises_runtime_error(): + runner = _make_runner() + runner.data_plane_client.get_batch_evaluation.side_effect = Exception("network error") + + with pytest.raises(RuntimeError, match="Failed to get batch evaluation result"): + runner._poll_for_results("eval-001", timeout=60, poll_interval=5) + + +def test_poll_for_results_unknown_status_raises_runtime_error(): + runner = _make_runner() + runner.data_plane_client.get_batch_evaluation.return_value = { + **_make_completed_response(), + "status": "UNKNOWN_STATE", + } + + with pytest.raises(RuntimeError, match="Unknown batch evaluation status"): + runner._poll_for_results("eval-001", timeout=60, poll_interval=5) + + +# --------------------------------------------------------------------------- +# fetch_evaluation_events: CloudWatch pagination (migrated from results reader) +# --------------------------------------------------------------------------- + +_CW_OUTPUT_CONFIG = CloudWatchOutputDataConfig( + log_group_name="/aws/agentcore/evaluation/results", + log_stream_name="batch-eval-abc123", +) + + +def _make_log_event(message: str) -> dict: + return {"timestamp": 1000, "message": message, "ingestionTime": 1000} + + +def _make_fetch_result(output_data_config): + result = MagicMock() + result.output_data_config = output_data_config + result.batch_evaluation_id = "eval-001" + return result + + +def test_fetch_evaluation_events_paginates_multiple_pages(): + runner = _make_runner() + runner._logs_client.get_log_events.side_effect = [ + {"events": [_make_log_event('{"page": 1}')], "nextForwardToken": "tok-1"}, + {"events": [_make_log_event('{"page": 2}')], "nextForwardToken": "tok-2"}, + {"events": [], "nextForwardToken": "tok-2"}, + ] + + events = runner.fetch_evaluation_events(_make_fetch_result(_CW_OUTPUT_CONFIG)) + + assert len(events) == 2 + assert events[0]["page"] == 1 + assert events[1]["page"] == 2 + + +def test_fetch_evaluation_events_skips_non_json(caplog): + import logging + + runner = _make_runner() + runner._logs_client.get_log_events.side_effect = [ + { + "events": [ + _make_log_event("not json at all"), + _make_log_event('{"traceId": "t1"}'), + ], + "nextForwardToken": "tok-1", + }, + {"events": [], "nextForwardToken": "tok-1"}, + ] + + with caplog.at_level(logging.WARNING): + events = runner.fetch_evaluation_events(_make_fetch_result(_CW_OUTPUT_CONFIG)) + + assert len(events) == 1 + assert events[0]["traceId"] == "t1" + assert "Skipping non-JSON" in caplog.text + + +def test_fetch_evaluation_events_empty_stream(): + runner = _make_runner() + runner._logs_client.get_log_events.side_effect = [ + {"events": [], "nextForwardToken": "tok-0"}, + {"events": [], "nextForwardToken": "tok-0"}, + ] + + events = runner.fetch_evaluation_events(_make_fetch_result(_CW_OUTPUT_CONFIG)) + + assert events == [] + + +def test_fetch_evaluation_events_start_from_head_only_on_first_call(): + runner = _make_runner() + runner._logs_client.get_log_events.side_effect = [ + {"events": [_make_log_event('{"page": 1}')], "nextForwardToken": "tok-1"}, + {"events": [], "nextForwardToken": "tok-1"}, + ] + + runner.fetch_evaluation_events(_make_fetch_result(_CW_OUTPUT_CONFIG)) + + calls = runner._logs_client.get_log_events.call_args_list + assert len(calls) == 2 + assert "startFromHead" in calls[0][1] + assert "startFromHead" not in calls[1][1] + assert calls[1][1]["nextToken"] == "tok-1" + + +def test_fetch_evaluation_events_raises_lookup_error_when_stream_not_found(): + runner = _make_runner() + + class ResourceNotFoundException(Exception): + pass + + runner._logs_client.exceptions.ResourceNotFoundException = ResourceNotFoundException + runner._logs_client.get_log_events.side_effect = ResourceNotFoundException("stream not found") + + with pytest.raises(LookupError, match="CloudWatch log stream not found"): + runner.fetch_evaluation_events(_make_fetch_result(_CW_OUTPUT_CONFIG)) diff --git a/tests/bedrock_agentcore/evaluation/runner/on_demand/test_runner.py b/tests/bedrock_agentcore/evaluation/runner/on_demand/test_runner.py index dede005e..79306e10 100644 --- a/tests/bedrock_agentcore/evaluation/runner/on_demand/test_runner.py +++ b/tests/bedrock_agentcore/evaluation/runner/on_demand/test_runner.py @@ -1,15 +1,19 @@ """Tests for the OnDemandEvaluationDatasetRunner and related types.""" -from unittest.mock import MagicMock, patch +from datetime import datetime, timezone +from unittest.mock import ANY, MagicMock, patch from bedrock_agentcore.evaluation.agent_span_collector import ( AgentSpanCollector, CloudWatchAgentSpanCollector, ) from bedrock_agentcore.evaluation.runner.dataset_types import ( + ActorProfile, Dataset, PredefinedScenario, Scenario, + SimulatedScenario, + SimulationConfig, Turn, ) from bedrock_agentcore.evaluation.runner.invoker_types import ( @@ -74,6 +78,19 @@ def test_custom_max_concurrent_scenarios(self): ) assert cfg.max_concurrent_scenarios == 10 + def test_simulation_config_defaults_to_none(self): + cfg = EvaluationRunConfig(evaluator_config=EvaluatorConfig(evaluator_ids=["accuracy"])) + assert cfg.simulation_config is None + + def test_simulation_config_accepted(self): + sim = SimulationConfig(model_id="us.amazon.nova-lite-v1:0") + cfg = EvaluationRunConfig( + evaluator_config=EvaluatorConfig(evaluator_ids=["accuracy"]), + simulation_config=sim, + ) + assert cfg.simulation_config is sim + assert cfg.simulation_config.model_id == "us.amazon.nova-lite-v1:0" + # --- Result dataclass tests --- @@ -932,3 +949,209 @@ class CustomScenario(Scenario): assert len(result.scenario_results) == 1 assert result.scenario_results[0].status == "FAILED" assert "No runner registered" in result.scenario_results[0].error + + +# --- _run_scenario dispatch tests --- + + +def _make_completed_exec_result(scenario_id: str) -> ScenarioExecutionResult: + now = datetime.now(timezone.utc) + return ScenarioExecutionResult( + scenario_id=scenario_id, + session_id=f"{scenario_id}-session", + start_time=now, + end_time=now, + status="COMPLETED", + ) + + +def _make_simulated_scenario() -> SimulatedScenario: + return SimulatedScenario( + scenario_id="sim-1", + actor_profile=ActorProfile(context="A customer", goal="Place an order"), + input="I'd like to order a pizza", + ) + + +class TestRunScenarioDispatch: + """Unit tests for _run_scenario — executor selection and kwarg threading. + + The runner looks up executor classes from its _scenario_executors dict (populated at + __init__), so module-level patching doesn't intercept the calls. Instead we inject + mock classes directly into the dict after construction. + """ + + @patch("bedrock_agentcore.evaluation.runner.on_demand.on_demand_runner.boto3") + def test_predefined_scenario_uses_predefined_executor(self, mock_boto3): + mock_boto3.client.side_effect = [MagicMock(), MagicMock()] + runner = OnDemandEvaluationDatasetRunner() + + mock_executor_cls = MagicMock() + mock_executor_cls.return_value.run_scenario.return_value = _make_completed_exec_result("p1") + runner._scenario_executors[PredefinedScenario] = mock_executor_cls + + scenario = PredefinedScenario(scenario_id="p1", turns=[Turn(input="hi")]) + config = EvaluationRunConfig( + evaluator_config=EvaluatorConfig(evaluator_ids=["e1"]), + evaluation_delay_seconds=0, + ) + + runner._run_scenario(config, scenario, lambda inp: AgentInvokerOutput(agent_output="ok")) + + mock_executor_cls.assert_called_once_with(agent_invoker=ANY) + call_kwargs = mock_executor_cls.call_args[1] + assert "simulation_config" not in call_kwargs + + @patch("bedrock_agentcore.evaluation.runner.on_demand.on_demand_runner.boto3") + def test_simulated_scenario_uses_simulated_executor(self, mock_boto3): + mock_boto3.client.side_effect = [MagicMock(), MagicMock()] + runner = OnDemandEvaluationDatasetRunner() + + mock_executor_cls = MagicMock() + mock_executor_cls.return_value.run_scenario.return_value = _make_completed_exec_result("sim-1") + runner._scenario_executors[SimulatedScenario] = mock_executor_cls + + scenario = _make_simulated_scenario() + sim_config = SimulationConfig(model_id="us.amazon.nova-lite-v1:0") + config = EvaluationRunConfig( + evaluator_config=EvaluatorConfig(evaluator_ids=["e1"]), + evaluation_delay_seconds=0, + simulation_config=sim_config, + ) + + runner._run_scenario(config, scenario, lambda inp: AgentInvokerOutput(agent_output="ok")) + + mock_executor_cls.assert_called_once_with(agent_invoker=ANY, simulation_config=sim_config) + + @patch("bedrock_agentcore.evaluation.runner.on_demand.on_demand_runner.boto3") + def test_simulated_scenario_passes_none_simulation_config_when_absent(self, mock_boto3): + mock_boto3.client.side_effect = [MagicMock(), MagicMock()] + runner = OnDemandEvaluationDatasetRunner() + + mock_executor_cls = MagicMock() + mock_executor_cls.return_value.run_scenario.return_value = _make_completed_exec_result("sim-1") + runner._scenario_executors[SimulatedScenario] = mock_executor_cls + + scenario = _make_simulated_scenario() + config = EvaluationRunConfig( + evaluator_config=EvaluatorConfig(evaluator_ids=["e1"]), + evaluation_delay_seconds=0, + # simulation_config omitted — defaults to None + ) + + runner._run_scenario(config, scenario, lambda inp: AgentInvokerOutput(agent_output="ok")) + + mock_executor_cls.assert_called_once_with(agent_invoker=ANY, simulation_config=None) + + @patch("bedrock_agentcore.evaluation.runner.on_demand.on_demand_runner.boto3") + def test_predefined_scenario_ignores_simulation_config(self, mock_boto3): + """simulation_config on the run config must not be forwarded to PredefinedScenarioExecutor.""" + mock_boto3.client.side_effect = [MagicMock(), MagicMock()] + runner = OnDemandEvaluationDatasetRunner() + + mock_executor_cls = MagicMock() + mock_executor_cls.return_value.run_scenario.return_value = _make_completed_exec_result("p1") + runner._scenario_executors[PredefinedScenario] = mock_executor_cls + + scenario = PredefinedScenario(scenario_id="p1", turns=[Turn(input="hi")]) + config = EvaluationRunConfig( + evaluator_config=EvaluatorConfig(evaluator_ids=["e1"]), + evaluation_delay_seconds=0, + simulation_config=SimulationConfig(model_id="some-model"), + ) + + runner._run_scenario(config, scenario, lambda inp: AgentInvokerOutput(agent_output="ok")) + + call_kwargs = mock_executor_cls.call_args[1] + assert "simulation_config" not in call_kwargs + + +# --- End-to-end: SimulatedScenario in run() --- + + +class TestOnDemandRunnerWithSimulatedScenario: + """Integration-style tests for run() when the dataset contains SimulatedScenario entries.""" + + def _make_config(self, simulation_config=None): + return EvaluationRunConfig( + evaluator_config=EvaluatorConfig(evaluator_ids=["accuracy"]), + evaluation_delay_seconds=0, + simulation_config=simulation_config, + ) + + @patch("bedrock_agentcore.evaluation.runner.on_demand.on_demand_runner.boto3") + def test_simulated_scenario_completes_end_to_end(self, mock_boto3): + """A simulated scenario runs through the full runner pipeline.""" + mock_dp_client = MagicMock() + mock_cp_client = MagicMock() + mock_boto3.client.side_effect = [mock_dp_client, mock_cp_client] + + mock_dp_client.evaluate.return_value = {"evaluationResults": [{"value": 0.9, "explanation": "Good"}]} + mock_cp_client.get_evaluator.return_value = {"level": "SESSION"} + + mock_collector = MagicMock(spec=AgentSpanCollector) + mock_collector.collect.return_value = [] + + dataset = Dataset(scenarios=[_make_simulated_scenario()]) + config = self._make_config() + + mock_sim_exec_cls = MagicMock() + mock_sim_exec_cls.return_value.run_scenario.return_value = _make_completed_exec_result("sim-1") + + runner = OnDemandEvaluationDatasetRunner(region="us-west-2") + runner._scenario_executors[SimulatedScenario] = mock_sim_exec_cls + + result = runner.run( + config=config, + dataset=dataset, + agent_invoker=lambda inp: AgentInvokerOutput(agent_output="ok"), + span_collector=mock_collector, + ) + + assert len(result.scenario_results) == 1 + sr = result.scenario_results[0] + assert sr.scenario_id == "sim-1" + assert sr.status == "COMPLETED" + assert sr.error is None + + @patch("bedrock_agentcore.evaluation.runner.on_demand.on_demand_runner.boto3") + def test_mixed_dataset_dispatches_each_executor_correctly(self, mock_boto3): + """Dataset with both PredefinedScenario and SimulatedScenario uses the right executor per type.""" + mock_dp_client = MagicMock() + mock_cp_client = MagicMock() + mock_boto3.client.side_effect = [mock_dp_client, mock_cp_client] + + mock_dp_client.evaluate.return_value = {"evaluationResults": [{"value": 1.0, "explanation": "Ok"}]} + mock_cp_client.get_evaluator.return_value = {"level": "SESSION"} + + mock_collector = MagicMock(spec=AgentSpanCollector) + mock_collector.collect.return_value = [] + + predefined = PredefinedScenario(scenario_id="pred-1", turns=[Turn(input="hello")]) + simulated = _make_simulated_scenario() + dataset = Dataset(scenarios=[predefined, simulated]) + sim_config = SimulationConfig(model_id="us.amazon.nova-lite-v1:0") + config = self._make_config(simulation_config=sim_config) + + mock_pred_cls = MagicMock() + mock_pred_cls.return_value.run_scenario.return_value = _make_completed_exec_result("pred-1") + mock_sim_cls = MagicMock() + mock_sim_cls.return_value.run_scenario.return_value = _make_completed_exec_result("sim-1") + + runner = OnDemandEvaluationDatasetRunner(region="us-west-2") + runner._scenario_executors[PredefinedScenario] = mock_pred_cls + runner._scenario_executors[SimulatedScenario] = mock_sim_cls + + result = runner.run( + config=config, + dataset=dataset, + agent_invoker=lambda inp: AgentInvokerOutput(agent_output="ok"), + span_collector=mock_collector, + ) + + # Each executor called exactly once for its scenario type + mock_pred_cls.assert_called_once_with(agent_invoker=ANY) + mock_sim_cls.assert_called_once_with(agent_invoker=ANY, simulation_config=sim_config) + + assert len(result.scenario_results) == 2 + assert all(sr.status == "COMPLETED" for sr in result.scenario_results) diff --git a/tests/bedrock_agentcore/evaluation/runner/test_simulated_scenario_executor.py b/tests/bedrock_agentcore/evaluation/runner/test_simulated_scenario_executor.py new file mode 100644 index 00000000..e74ae257 --- /dev/null +++ b/tests/bedrock_agentcore/evaluation/runner/test_simulated_scenario_executor.py @@ -0,0 +1,751 @@ +"""Unit tests for SimulatedScenario types, SimulationConfig, and SimulatedScenarioExecutor.""" + +import builtins +import json +import sys +from unittest.mock import MagicMock, patch + +import pytest +from pydantic import BaseModel, ValidationError + +from bedrock_agentcore.evaluation.runner.dataset_types import ActorProfile, SimulatedScenario, SimulationConfig +from bedrock_agentcore.evaluation.runner.invoker_types import AgentInvokerOutput +from bedrock_agentcore.evaluation.runner.prompts import render_template_file, render_template_string +from bedrock_agentcore.evaluation.runner.scenario_executor import ( + SimulatedScenarioExecutor, + _build_payload, + _extract_agent_output, + _make_response_model, + _render_system_prompt, + _to_string, +) + +# --------------------------------------------------------------------------- +# Shared Pydantic models used across tests +# --------------------------------------------------------------------------- + + +class OrderRequest(BaseModel): + item: str + quantity: int + + +class OrderConfirmation(BaseModel): + order_id: str + status: str + + +# --------------------------------------------------------------------------- +# Fixtures +# --------------------------------------------------------------------------- + + +@pytest.fixture +def actor_profile(): + return ActorProfile( + traits={"expertise": "novice", "tone": "friendly"}, + context="A customer ordering food online", + goal="Place a pizza order successfully", + ) + + +@pytest.fixture +def simulated_scenario(actor_profile): + return SimulatedScenario( + scenario_id="sim-1", + scenario_description="Customer orders pizza", + actor_profile=actor_profile, + input={"item": "pizza", "quantity": 2}, + max_turns=5, + ) + + +@pytest.fixture +def mock_strands(): + """Patch strands imports used by AgentCoreActorSimulator and SimulatedScenarioExecutor.""" + mock_agent_instance = MagicMock() + mock_agent_cls = MagicMock(return_value=mock_agent_instance) + mock_profile_cls = MagicMock() + mock_goal_tool = MagicMock() + + modules = { + "strands": MagicMock(Agent=mock_agent_cls), + "strands_evals": MagicMock(), + "strands_evals.simulation": MagicMock(), + "strands_evals.simulation.tools": MagicMock(), + "strands_evals.simulation.tools.goal_completion": MagicMock(get_conversation_goal_completion=mock_goal_tool), + "strands_evals.types": MagicMock(), + "strands_evals.types.simulation": MagicMock(ActorProfile=mock_profile_cls), + } + with patch.dict(sys.modules, modules): + yield mock_agent_cls, mock_agent_instance, mock_profile_cls + + +def _make_actor_side_effect(messages, stops=None): + """Build a side_effect for mock_agent_instance that cycles through messages/stops. + + Each call to the mock agent (i.e. each simulator.act() call) returns a + MagicMock whose structured_output mimics SimulatorActorResponse fields. + """ + if stops is None: + # Default: all non-stop until the last message, then stop + stops = [False] * (len(messages) - 1) + [True] if messages else [True] + + call_count = {"n": 0} + + def side_effect(agent_msg, structured_output_model=None, **kwargs): + idx = call_count["n"] + call_count["n"] += 1 + result = MagicMock() + result.structured_output.message = messages[idx] if idx < len(messages) else None + result.structured_output.stop = stops[idx] if idx < len(stops) else True + result.structured_output.reasoning = "test reasoning" + return result + + return side_effect + + +# --------------------------------------------------------------------------- +# TestActorProfile +# --------------------------------------------------------------------------- + + +class TestActorProfile: + def test_construction_with_all_fields(self): + profile = ActorProfile( + traits={"expertise": "expert"}, + context="A senior engineer", + goal="Debug a production issue", + ) + assert profile.traits == {"expertise": "expert"} + assert profile.context == "A senior engineer" + assert profile.goal == "Debug a production issue" + + def test_traits_defaults_to_empty_dict(self): + profile = ActorProfile(context="Some context", goal="Some goal") + assert profile.traits == {} + + def test_context_required(self): + with pytest.raises(ValidationError): + ActorProfile(goal="Some goal") + + def test_goal_required(self): + with pytest.raises(ValidationError): + ActorProfile(context="Some context") + + +# --------------------------------------------------------------------------- +# TestSimulatedScenario +# --------------------------------------------------------------------------- + + +class TestSimulatedScenario: + def test_construction(self, actor_profile): + scenario = SimulatedScenario( + scenario_id="s1", + scenario_description="desc", + actor_profile=actor_profile, + input="hello", + ) + assert scenario.scenario_id == "s1" + assert scenario.max_turns == 10 # default + + def test_actor_profile_must_be_actor_profile_type(self): + with pytest.raises(ValidationError): + SimulatedScenario( + scenario_id="s1", + scenario_description="desc", + actor_profile="plain string", + input="hello", + ) + + def test_max_turns_must_be_at_least_one(self, actor_profile): + with pytest.raises(ValidationError): + SimulatedScenario( + scenario_id="s1", + scenario_description="desc", + actor_profile=actor_profile, + input="hello", + max_turns=0, + ) + + def test_input_accepts_dict(self, actor_profile): + scenario = SimulatedScenario( + scenario_id="s1", + scenario_description="desc", + actor_profile=actor_profile, + input={"key": "value"}, + ) + assert scenario.input == {"key": "value"} + + +# --------------------------------------------------------------------------- +# TestSimulationConfig +# --------------------------------------------------------------------------- + + +class TestSimulationConfig: + def test_all_fields_optional(self): + config = SimulationConfig() + assert config.model_id is None + assert config.system_prompt_template is None + assert config.input_type is None + assert config.output_type is None + + def test_accepts_pydantic_model_classes(self): + config = SimulationConfig(input_type=OrderRequest, output_type=OrderConfirmation) + assert config.input_type is OrderRequest + assert config.output_type is OrderConfirmation + + def test_accepts_model_id_and_template(self): + config = SimulationConfig( + model_id="us.amazon.nova-lite-v1:0", + system_prompt_template="You are: {{ actor_profile }}", + ) + assert config.model_id == "us.amazon.nova-lite-v1:0" + + +# --------------------------------------------------------------------------- +# TestToString +# --------------------------------------------------------------------------- + + +class TestToString: + def test_str_passthrough(self): + assert _to_string("hello") == "hello" + + def test_dict_becomes_json(self): + result = _to_string({"a": 1}) + assert json.loads(result) == {"a": 1} + + def test_pydantic_model_becomes_json(self): + model = OrderRequest(item="pizza", quantity=2) + result = _to_string(model) + parsed = json.loads(result) + assert parsed["item"] == "pizza" + assert parsed["quantity"] == 2 + + def test_other_types_use_str(self): + assert _to_string(42) == "42" + + +# --------------------------------------------------------------------------- +# TestBuildFirstPayload +# --------------------------------------------------------------------------- + + +class TestBuildFirstPayload: + def test_dict_parsed_into_input_type(self): + config = SimulationConfig(input_type=OrderRequest) + result = _build_payload({"item": "pizza", "quantity": 2}, config) + assert isinstance(result, OrderRequest) + assert result.item == "pizza" + + def test_json_string_parsed_into_input_type(self): + config = SimulationConfig(input_type=OrderRequest) + result = _build_payload('{"item": "burger", "quantity": 1}', config) + assert isinstance(result, OrderRequest) + assert result.item == "burger" + + def test_invalid_json_string_raises(self): + config = SimulationConfig(input_type=OrderRequest) + with pytest.raises(ValidationError): + _build_payload("not json", config) + + def test_no_input_type_returns_dict_unchanged(self): + result = _build_payload({"item": "pizza"}, None) + assert result == {"item": "pizza"} + + def test_no_input_type_returns_string_unchanged(self): + result = _build_payload("hello", None) + assert result == "hello" + + def test_correct_basemodel_instance_returned_unchanged(self): + config = SimulationConfig(input_type=OrderRequest) + instance = OrderRequest(item="pizza", quantity=2) + result = _build_payload(instance, config) + assert result is instance + + def test_wrong_basemodel_compatible_fields_coerced(self): + # A different BaseModel with the same fields — should be coerced to input_type. + class AnotherRequest(BaseModel): + item: str + quantity: int + + config = SimulationConfig(input_type=OrderRequest) + result = _build_payload(AnotherRequest(item="sushi", quantity=3), config) + assert isinstance(result, OrderRequest) + assert result.item == "sushi" + + def test_wrong_basemodel_incompatible_fields_raises(self): + # OrderConfirmation has different fields — coercion should raise ValidationError. + config = SimulationConfig(input_type=OrderRequest) + with pytest.raises(ValidationError): + _build_payload(OrderConfirmation(order_id="1", status="ok"), config) + + +# --------------------------------------------------------------------------- +# TestExtractAgentOutput +# --------------------------------------------------------------------------- + + +class TestExtractAgentOutput: + def test_no_output_type_returns_string_unchanged(self): + assert _extract_agent_output("plain text", None) == "plain text" + + def test_no_output_type_serializes_dict(self): + result = _extract_agent_output({"answer": "42"}, None) + assert json.loads(result) == {"answer": "42"} + + def test_valid_output_returns_canonical_json(self): + class AgentAnswer(BaseModel): + answer: str + + result = _extract_agent_output('{"answer": "Hello world"}', AgentAnswer) + assert json.loads(result) == {"answer": "Hello world"} + + def test_valid_pydantic_instance_returns_canonical_json(self): + class AgentAnswer(BaseModel): + answer: str + + result = _extract_agent_output(AgentAnswer(answer="42"), AgentAnswer) + assert json.loads(result) == {"answer": "42"} + + def test_plain_text_falls_back_when_parse_fails(self): + class OrderConfirmation(BaseModel): + order_id: str + status: str + + plain = "Here is your order summary: item confirmed." + result = _extract_agent_output(plain, OrderConfirmation) + assert result == plain + + +# --------------------------------------------------------------------------- +# TestMakeResponseModel +# --------------------------------------------------------------------------- + + +class TestMakeResponseModel: + def test_no_input_type_message_is_str(self): + model_cls = _make_response_model(None) + instance = model_cls(reasoning="r", stop=False, message="hello") + assert instance.message == "hello" + + def test_with_input_type_message_is_optional_input_type(self): + model_cls = _make_response_model(OrderRequest) + order = OrderRequest(item="pizza", quantity=1) + instance = model_cls(reasoning="r", stop=False, message=order) + assert isinstance(instance.message, OrderRequest) + assert instance.message.item == "pizza" + + def test_with_input_type_message_can_be_none_on_stop(self): + model_cls = _make_response_model(OrderRequest) + instance = model_cls(reasoning="r", stop=True, message=None) + assert instance.message is None + assert instance.stop is True + + def test_stop_defaults_to_false(self): + model_cls = _make_response_model(None) + instance = model_cls(reasoning="r", message="hi") + assert instance.stop is False + + def test_reasoning_required(self): + model_cls = _make_response_model(None) + with pytest.raises(ValidationError): + model_cls(stop=False, message="hi") + + +# --------------------------------------------------------------------------- +# TestPromptTemplates +# --------------------------------------------------------------------------- + + +class TestPromptTemplates: + def test_structured_template_renders_actor_profile(self): + result = render_template_file( + "structured_user_simulator.j2", + actor_profile={"context": "a tester", "goal": "find bugs", "traits": {}}, + ) + assert "a tester" in result + assert "find bugs" in result + + def test_structured_template_renders_scenario_description(self): + result = render_template_file( + "structured_user_simulator.j2", + actor_profile={"context": "ctx", "goal": "goal", "traits": {}}, + scenario_description="Customer orders pizza", + ) + assert "Customer orders pizza" in result + + def test_structured_template_renders_output_schema(self): + schema = json.dumps(OrderConfirmation.model_json_schema(), indent=2) + result = render_template_file( + "structured_user_simulator.j2", + actor_profile={"context": "ctx", "goal": "goal", "traits": {}}, + output_schema=schema, + ) + assert "order_id" in result or "OrderConfirmation" in result + + def test_structured_template_no_input_schema_section(self): + """Template must not contain JSON output instructions (handled by response model).""" + result = render_template_file( + "structured_user_simulator.j2", + actor_profile={"context": "ctx", "goal": "goal", "traits": {}}, + ) + assert "MUST be a valid JSON object" not in result + assert "MANDATORY OUTPUT FORMAT" not in result + + def test_structured_template_contains_stop_instruction(self): + result = render_template_file( + "structured_user_simulator.j2", + actor_profile={"context": "ctx", "goal": "goal", "traits": {}}, + ) + assert "stop=true" in result + + def test_render_template_string_substitutes_variables(self): + template = "Hello {{ name }}, your goal is {{ goal }}." + result = render_template_string(template, name="Alice", goal="test the agent") + assert result == "Hello Alice, your goal is test the agent." + + def test_render_template_string_handles_json_curly_braces(self): + """Jinja2 must not misinterpret JSON { } as template syntax.""" + template = "Schema: {{ schema }}" + schema = '{"type": "object", "properties": {"x": {"type": "string"}}}' + result = render_template_string(template, schema=schema) + assert schema in result + + def test_render_system_prompt_output_type_injected(self, mock_strands): + """output_type schema must appear in the rendered prompt.""" + _, _, mock_profile_cls = mock_strands + mock_profile = MagicMock() + mock_profile.model_dump.return_value = {"traits": {}, "context": "ctx", "actor_goal": "goal"} + + config = SimulationConfig(output_type=OrderConfirmation) + rendered = _render_system_prompt(config, mock_profile) + + assert "order_id" in rendered or "OrderConfirmation" in rendered + assert "None" not in rendered + + def test_render_system_prompt_scenario_description_injected(self, mock_strands): + mock_profile = MagicMock() + mock_profile.model_dump.return_value = {"traits": {}, "context": "ctx", "actor_goal": "goal"} + + rendered = _render_system_prompt(None, mock_profile, scenario_description="Customer orders pizza") + + assert "Customer orders pizza" in rendered + + def test_render_system_prompt_always_uses_builtin_template(self, mock_strands): + """Built-in template is always used (no external default fallback).""" + mock_profile = MagicMock() + mock_profile.model_dump.return_value = {"traits": {}, "context": "ctx", "actor_goal": "goal"} + + rendered = _render_system_prompt(None, mock_profile) + + # Built-in template always contains the stop instruction + assert "stop=true" in rendered + + def test_render_system_prompt_no_input_schema_in_output(self, mock_strands): + """input_type must NOT inject a JSON output schema into the system prompt.""" + mock_profile = MagicMock() + mock_profile.model_dump.return_value = {"traits": {}, "context": "ctx", "actor_goal": "goal"} + + config = SimulationConfig(input_type=OrderRequest) + rendered = _render_system_prompt(config, mock_profile) + + assert "MUST be a valid JSON object" not in rendered + assert "item" not in rendered # OrderRequest field must not appear in prompt + + +# --------------------------------------------------------------------------- +# TestSimulatedScenarioExecutor +# --------------------------------------------------------------------------- + + +class TestSimulatedScenarioExecutor: + def test_successful_single_turn(self, simulated_scenario, mock_strands): + """Actor stops after first turn — agent invoked exactly once.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=[None], stops=[True]) + + invoker_calls = [] + + def invoker(inp): + invoker_calls.append(inp) + return AgentInvokerOutput(agent_output="agent response") + + executor = SimulatedScenarioExecutor(agent_invoker=invoker) + result = executor.run_scenario(simulated_scenario) + + assert result.status == "COMPLETED" + assert result.scenario_id == "sim-1" + assert result.session_id.startswith("sim-1-") + assert result.error is None + assert result.start_time < result.end_time + assert len(invoker_calls) == 1 + + def test_successful_multi_turn(self, simulated_scenario, mock_strands): + """Actor continues for 2 turns then stops — 3 agent calls total.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect( + messages=["second msg", "third msg", None], + stops=[False, False, True], + ) + + invoker_calls = [] + + def invoker(inp): + invoker_calls.append(inp) + return AgentInvokerOutput(agent_output="agent response") + + executor = SimulatedScenarioExecutor(agent_invoker=invoker) + result = executor.run_scenario(simulated_scenario) + + assert result.status == "COMPLETED" + assert len(invoker_calls) == 3 + + def test_same_session_id_across_all_turns(self, simulated_scenario, mock_strands): + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=["msg 1", None], stops=[False, True]) + + session_ids = [] + + def invoker(inp): + session_ids.append(inp.session_id) + return AgentInvokerOutput(agent_output="response") + + executor = SimulatedScenarioExecutor(agent_invoker=invoker) + executor.run_scenario(simulated_scenario) + + assert len(set(session_ids)) == 1, "session_id must be identical across all turns" + + def test_agent_invoker_failure_marks_scenario_failed(self, simulated_scenario, mock_strands): + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=["msg"], stops=[True]) + + def failing_invoker(inp): + raise RuntimeError("agent exploded") + + executor = SimulatedScenarioExecutor(agent_invoker=failing_invoker) + result = executor.run_scenario(simulated_scenario) + + assert result.status == "FAILED" + assert "agent exploded" in result.error + + def test_actor_stops_at_max_turns(self, actor_profile, mock_strands): + """Actor never signals stop — max_turns enforces the limit.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + # Actor always returns stop=False; turn count check ends the loop. + mock_agent_instance.side_effect = _make_actor_side_effect( + messages=["keep going"] * 10, + stops=[False] * 10, + ) + + scenario = SimulatedScenario( + scenario_id="s-maxturn", + scenario_description="desc", + actor_profile=actor_profile, + input="hello", + max_turns=3, + ) + + invoker_calls = [] + + def invoker(inp): + invoker_calls.append(inp) + return AgentInvokerOutput(agent_output="response") + + executor = SimulatedScenarioExecutor(agent_invoker=invoker) + result = executor.run_scenario(scenario) + + assert result.status == "COMPLETED" + assert mock_agent_instance.call_count == 3 + + def test_strands_not_installed_returns_failed(self, simulated_scenario): + _real_import = builtins.__import__ + + def _blocking_import(name, *args, **kwargs): + if name.startswith("strands"): + raise ImportError(f"No module named {name!r}") + return _real_import(name, *args, **kwargs) + + with patch.object(builtins, "__import__", side_effect=_blocking_import): + executor = SimulatedScenarioExecutor(agent_invoker=lambda inp: AgentInvokerOutput(agent_output="")) + result = executor.run_scenario(simulated_scenario) + + assert result.status == "FAILED" + assert "strands-agents-evals" in result.error + + def test_input_type_parses_input_dict(self, actor_profile, mock_strands): + """input dict is validated into input_type for the first agent call.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=[None], stops=[True]) + + scenario = SimulatedScenario( + scenario_id="s-typed", + scenario_description="desc", + actor_profile=actor_profile, + input={"item": "pizza", "quantity": 2}, + ) + + received = [] + + def invoker(inp): + received.append(inp.payload) + return AgentInvokerOutput(agent_output="ok") + + config = SimulationConfig(input_type=OrderRequest) + executor = SimulatedScenarioExecutor(agent_invoker=invoker, simulation_config=config) + executor.run_scenario(scenario) + + assert len(received) == 1 + assert isinstance(received[0], OrderRequest) + assert received[0].item == "pizza" + + def test_input_type_message_passed_to_agent_as_typed_instance(self, actor_profile, mock_strands): + """Actor's input_type message is passed directly to the invoker as a typed instance.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + burger_order = OrderRequest(item="burger", quantity=1) + mock_agent_instance.side_effect = _make_actor_side_effect( + messages=[burger_order, None], + stops=[False, True], + ) + + scenario = SimulatedScenario( + scenario_id="s-typed-msg", + scenario_description="desc", + actor_profile=actor_profile, + input={"item": "pizza", "quantity": 2}, + ) + + received = [] + + def invoker(inp): + received.append(inp.payload) + return AgentInvokerOutput(agent_output="ok") + + config = SimulationConfig(input_type=OrderRequest) + executor = SimulatedScenarioExecutor(agent_invoker=invoker, simulation_config=config) + executor.run_scenario(scenario) + + # Turn 1: input → OrderRequest + # Turn 2: actor's typed message → OrderRequest (same type, consistent) + assert len(received) == 2 + assert isinstance(received[0], OrderRequest) + assert isinstance(received[1], OrderRequest) + assert received[1].item == "burger" + + def test_null_message_on_continue_treated_as_stop(self, actor_profile, mock_strands): + """If actor returns stop=False but message=None with input_type set, treat as goal_completed.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect( + messages=[None], + stops=[False], # stop=False but message=None + ) + + scenario = SimulatedScenario( + scenario_id="s-null-msg", + scenario_description="desc", + actor_profile=actor_profile, + input={"item": "pizza", "quantity": 1}, + ) + + config = SimulationConfig(input_type=OrderRequest) + executor = SimulatedScenarioExecutor( + agent_invoker=lambda inp: AgentInvokerOutput(agent_output="ok"), + simulation_config=config, + ) + result = executor.run_scenario(scenario) + + assert result.status == "COMPLETED" + + def test_agent_output_serialized_for_actor(self, actor_profile, mock_strands): + """Agent output is serialized with _to_string and passed as-is to the actor.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=[None], stops=[True]) + + scenario = SimulatedScenario( + scenario_id="s-output-single", + scenario_description="desc", + actor_profile=actor_profile, + input="hello", + ) + + class AgentAnswer(BaseModel): + answer: str + + agent_output = AgentAnswer(answer="Here is the answer.") + + def invoker(inp): + return AgentInvokerOutput(agent_output=agent_output) + + config = SimulationConfig(output_type=AgentAnswer) + executor = SimulatedScenarioExecutor(agent_invoker=invoker, simulation_config=config) + executor.run_scenario(scenario) + + mock_agent_instance.assert_called_once() + actual_arg = mock_agent_instance.call_args[0][0] + # Full serialized output passed to actor unchanged + assert json.loads(actual_arg) == {"answer": "Here is the answer."} + + def test_agent_output_multi_field_schema_passes_json_to_actor(self, actor_profile, mock_strands): + """When output_type has multiple fields, actor receives the full JSON string.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=[None], stops=[True]) + + scenario = SimulatedScenario( + scenario_id="s-output-multi", + scenario_description="desc", + actor_profile=actor_profile, + input="hello", + ) + + agent_output = OrderConfirmation(order_id="ORD-99", status="confirmed") + + def invoker(inp): + return AgentInvokerOutput(agent_output=agent_output) + + config = SimulationConfig(output_type=OrderConfirmation) + executor = SimulatedScenarioExecutor(agent_invoker=invoker, simulation_config=config) + executor.run_scenario(scenario) + + mock_agent_instance.assert_called_once() + actual_arg = mock_agent_instance.call_args[0][0] + # Multi-field schema — full JSON passed so actor can use schema context + assert actual_arg == agent_output.model_dump_json() + + def test_agent_plain_text_passed_through_when_output_type_parse_fails(self, actor_profile, mock_strands): + """When agent returns plain text that can't be parsed as output_type, pass it through.""" + mock_agent_cls, mock_agent_instance, _ = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=[None], stops=[True]) + + scenario = SimulatedScenario( + scenario_id="s-output-fallback", + scenario_description="desc", + actor_profile=actor_profile, + input="hello", + ) + + def invoker(inp): + return AgentInvokerOutput(agent_output="Here is a plain text response.") + + config = SimulationConfig(output_type=OrderConfirmation) + executor = SimulatedScenarioExecutor(agent_invoker=invoker, simulation_config=config) + executor.run_scenario(scenario) + + mock_agent_instance.assert_called_once() + actual_arg = mock_agent_instance.call_args[0][0] + assert actual_arg == "Here is a plain text response." + + def test_actor_profile_mapped_to_strands_profile(self, simulated_scenario, mock_strands): + mock_agent_cls, mock_agent_instance, mock_profile_cls = mock_strands + mock_agent_instance.side_effect = _make_actor_side_effect(messages=[None], stops=[True]) + + executor = SimulatedScenarioExecutor(agent_invoker=lambda inp: AgentInvokerOutput(agent_output="ok")) + executor.run_scenario(simulated_scenario) + + mock_profile_cls.assert_called_once_with( + traits=simulated_scenario.actor_profile.traits, + context=simulated_scenario.actor_profile.context, + actor_goal=simulated_scenario.actor_profile.goal, + ) diff --git a/tests/bedrock_agentcore/evaluation/test_dataset_parser.py b/tests/bedrock_agentcore/evaluation/test_dataset_parser.py index a383a17b..da605375 100644 --- a/tests/bedrock_agentcore/evaluation/test_dataset_parser.py +++ b/tests/bedrock_agentcore/evaluation/test_dataset_parser.py @@ -6,8 +6,10 @@ from bedrock_agentcore.evaluation.runner.dataset_providers import FileDatasetProvider from bedrock_agentcore.evaluation.runner.dataset_types import ( + ActorProfile, Dataset, PredefinedScenario, + SimulatedScenario, ) @@ -74,7 +76,7 @@ def test_parse_empty_scenarios(self, tmp_path): def test_parse_missing_required_field(self, tmp_path): data = {"scenarios": [{"scenario_id": "s1"}]} - with pytest.raises(KeyError): + with pytest.raises(ValueError): self._write_and_load(tmp_path, data) def test_file_not_found(self): @@ -89,3 +91,79 @@ def test_invalid_json(self, tmp_path): provider = FileDatasetProvider(str(file_path)) with pytest.raises(json.JSONDecodeError): provider.get_dataset() + + def test_parse_simulated(self, tmp_path): + data = { + "scenarios": [ + { + "scenario_id": "sim-1", + "scenario_description": "Customer orders pizza", + "actor_profile": { + "traits": {"expertise": "novice"}, + "context": "A hungry customer", + "goal": "Order a pizza successfully", + }, + "input": "I'd like to order a pizza", + "max_turns": 5, + } + ] + } + dataset = self._write_and_load(tmp_path, data) + assert len(dataset.scenarios) == 1 + scenario = dataset.scenarios[0] + assert isinstance(scenario, SimulatedScenario) + assert scenario.scenario_id == "sim-1" + assert scenario.scenario_description == "Customer orders pizza" + assert isinstance(scenario.actor_profile, ActorProfile) + assert scenario.actor_profile.goal == "Order a pizza successfully" + assert scenario.input == "I'd like to order a pizza" + assert scenario.max_turns == 5 + + def test_parse_simulated_default_max_turns(self, tmp_path): + data = { + "scenarios": [ + { + "scenario_id": "sim-2", + "scenario_description": "desc", + "actor_profile": {"context": "ctx", "goal": "goal"}, + "input": "hello", + } + ] + } + dataset = self._write_and_load(tmp_path, data) + assert dataset.scenarios[0].max_turns == 10 + + def test_parse_simulated_with_assertions(self, tmp_path): + data = { + "scenarios": [ + { + "scenario_id": "sim-3", + "scenario_description": "desc", + "actor_profile": {"context": "ctx", "goal": "goal"}, + "input": "hello", + "assertions": ["Must greet user", "Must confirm order"], + } + ] + } + dataset = self._write_and_load(tmp_path, data) + assert dataset.scenarios[0].assertions == ["Must greet user", "Must confirm order"] + + def test_parse_mixed_predefined_and_simulated(self, tmp_path): + data = { + "scenarios": [ + { + "scenario_id": "pre-1", + "turns": [{"input": "hello"}], + }, + { + "scenario_id": "sim-1", + "scenario_description": "desc", + "actor_profile": {"context": "ctx", "goal": "goal"}, + "input": "hello", + }, + ] + } + dataset = self._write_and_load(tmp_path, data) + assert len(dataset.scenarios) == 2 + assert isinstance(dataset.scenarios[0], PredefinedScenario) + assert isinstance(dataset.scenarios[1], SimulatedScenario) diff --git a/tests/bedrock_agentcore/runtime/test_app.py b/tests/bedrock_agentcore/runtime/test_app.py index bfc756a0..5f5d5f35 100644 --- a/tests/bedrock_agentcore/runtime/test_app.py +++ b/tests/bedrock_agentcore/runtime/test_app.py @@ -11,7 +11,11 @@ import pytest from starlette.testclient import TestClient +import bedrock_agentcore.runtime.app as _app_module +from bedrock_agentcore.config_bundle.bundle import ConfigBundleRef from bedrock_agentcore.runtime import BedrockAgentCoreApp +from bedrock_agentcore.runtime.app import _parse_runtime_arn +from bedrock_agentcore.runtime.context import BedrockAgentCoreContext class TestBedrockAgentCoreApp: @@ -2750,3 +2754,217 @@ def otel_simulated_target(): # Verify the loop can actually execute work future = asyncio.run_coroutine_threadsafe(asyncio.sleep(0, result="otel_ok"), app._worker_loop) assert future.result(timeout=5) == "otel_ok" + + +class TestConfigBundleBaggageParsing: + """Tests that the app parses OTEL baggage and populates config bundle refs.""" + + ARN = "arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/my-agent" + + def test_baggage_header_populates_bundle_ref(self): + captured_refs = [] + + app = BedrockAgentCoreApp() + + @app.entrypoint + def handler(payload): + ref = BedrockAgentCoreContext.get_config_bundle_ref() + if ref is not None: + captured_refs.append(ref) + return {} + + client = TestClient(app) + client.post( + "/invocations", + json={}, + headers={"baggage": f"aws.agentcore.configbundle_arn={self.ARN},aws.agentcore.configbundle_version=2"}, + ) + + assert len(captured_refs) == 1 + assert captured_refs[0] == ConfigBundleRef(bundle_arn=self.ARN, bundle_version="2") + + def test_no_baggage_header_leaves_ref_none(self): + captured = [] + + app = BedrockAgentCoreApp() + + @app.entrypoint + def handler(payload): + captured.append(BedrockAgentCoreContext.get_config_bundle_ref()) + return {} + + client = TestClient(app) + client.post("/invocations", json={}) + + assert captured == [None] + + def test_custom_config_client_is_used(self, monkeypatch): + runtime_arn = "arn:aws:bedrock-agentcore:us-west-2:123456789012:runtime/MyRuntime" + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={runtime_arn}") + + mock_client = MagicMock() + mock_client.get_configuration_bundle_version.return_value = { + "components": {runtime_arn: {"configuration": {"model_id": "claude-3"}}} + } + + app = BedrockAgentCoreApp() + + @app.entrypoint + def handler(payload): + return BedrockAgentCoreContext.get_config_bundle() + + with patch.object(app, "_config_client", mock_client): + client = TestClient(app) + response = client.post( + "/invocations", + json={}, + headers={"baggage": f"aws.agentcore.configbundle_arn={self.ARN},aws.agentcore.configbundle_version=2"}, + ) + + assert response.status_code == 200 + mock_client.get_configuration_bundle_version.assert_called_once_with(bundleId="my-agent", versionId="2") + + +# --------------------------------------------------------------------------- +# Constants shared by TestParseRuntimeArn and TestResolveBundleConfig +# --------------------------------------------------------------------------- +RUNTIME_ARN = "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/MyRuntime-a1b2c3d4" +ENDPOINT_ARN = f"{RUNTIME_ARN}/runtime-endpoint/DEFAULT" +ENDPOINT_ARN_WITH_QUALIFIER = f"{ENDPOINT_ARN}:DEFAULT" +BUNDLE_ARN = "arn:aws:bedrock-agentcore:us-east-1:123456789012:bundle/my-bundle" +COMPONENTS = {RUNTIME_ARN: {"configuration": {"model_id": "claude-3-5-sonnet", "temperature": 0.7}}} + + +@pytest.fixture(autouse=False) +def reset_arn_cache(monkeypatch): + """Reset the module-level _runtime_arn_cache sentinel before each test that requests it.""" + monkeypatch.setattr(_app_module, "_runtime_arn_cache", _app_module._UNRESOLVED) + + +class TestParseRuntimeArn: + """Unit tests for the _parse_runtime_arn() helper in app.py.""" + + def test_returns_none_when_env_var_absent(self, monkeypatch, reset_arn_cache): + monkeypatch.delenv("OTEL_RESOURCE_ATTRIBUTES", raising=False) + assert _parse_runtime_arn() is None + + def test_returns_none_when_cloud_resource_id_missing(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", "service.name=my-agent,deployment.environment=prod") + assert _parse_runtime_arn() is None + + def test_returns_plain_runtime_arn(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + assert _parse_runtime_arn() == RUNTIME_ARN + + def test_normalises_endpoint_arn_to_runtime_arn(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={ENDPOINT_ARN}") + assert _parse_runtime_arn() == RUNTIME_ARN + + def test_normalises_endpoint_arn_with_qualifier(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={ENDPOINT_ARN_WITH_QUALIFIER}") + assert _parse_runtime_arn() == RUNTIME_ARN + + def test_ignores_other_attributes_before_cloud_resource_id(self, monkeypatch, reset_arn_cache): + otel = f"service.name=agent, deployment.environment=prod, cloud.resource_id={RUNTIME_ARN}" + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", otel) + assert _parse_runtime_arn() == RUNTIME_ARN + + def test_result_is_cached_across_calls(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + first = _parse_runtime_arn() + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", "service.name=other") + second = _parse_runtime_arn() + assert first == second == RUNTIME_ARN + + def test_none_result_is_also_cached(self, monkeypatch, reset_arn_cache): + monkeypatch.delenv("OTEL_RESOURCE_ATTRIBUTES", raising=False) + first = _parse_runtime_arn() + assert first is None + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + second = _parse_runtime_arn() + assert second is None + + +class TestResolveBundleConfig: + """Unit tests for BedrockAgentCoreApp._resolve_bundle_config().""" + + def _make_app_with_mock_client(self, components=None): + app = BedrockAgentCoreApp() + mock_client = MagicMock() + mock_client.get_configuration_bundle_version.return_value = {"components": components or {}} + app._config_client = mock_client + return app, mock_client + + def test_returns_empty_when_otel_env_absent(self, monkeypatch, reset_arn_cache): + monkeypatch.delenv("OTEL_RESOURCE_ATTRIBUTES", raising=False) + app, _ = self._make_app_with_mock_client(COMPONENTS) + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + result = app._resolve_bundle_config(ref) + assert result == {} + + def test_returns_empty_when_runtime_arn_not_in_components(self, monkeypatch, reset_arn_cache): + other_arn = "arn:aws:bedrock-agentcore:us-east-1:123456789012:runtime/OtherRuntime" + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={other_arn}") + app, _ = self._make_app_with_mock_client(COMPONENTS) + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + result = app._resolve_bundle_config(ref) + assert result == {} + + def test_returns_configuration_for_matching_runtime_arn(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + app, _ = self._make_app_with_mock_client(COMPONENTS) + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + result = app._resolve_bundle_config(ref) + assert result == {"model_id": "claude-3-5-sonnet", "temperature": 0.7} + + def test_normalises_endpoint_arn_for_component_lookup(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={ENDPOINT_ARN_WITH_QUALIFIER}") + app, _ = self._make_app_with_mock_client(COMPONENTS) + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + result = app._resolve_bundle_config(ref) + assert result == {"model_id": "claude-3-5-sonnet", "temperature": 0.7} + + def test_returns_empty_dict_when_configuration_key_missing(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + app, _ = self._make_app_with_mock_client({RUNTIME_ARN: {}}) + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + result = app._resolve_bundle_config(ref) + assert result == {} + + def test_raises_on_api_error(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + app = BedrockAgentCoreApp() + mock_client = MagicMock() + mock_client.get_configuration_bundle_version.side_effect = RuntimeError("API error") + app._config_client = mock_client + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + with pytest.raises(RuntimeError, match="API error"): + app._resolve_bundle_config(ref) + + def test_same_ref_fetched_only_once_across_calls(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + app, mock_client = self._make_app_with_mock_client(COMPONENTS) + ref = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + + result1 = app._resolve_bundle_config(ref) + result2 = app._resolve_bundle_config(ref) + + assert result1 == result2 == {"model_id": "claude-3-5-sonnet", "temperature": 0.7} + mock_client.get_configuration_bundle_version.assert_called_once() + + def test_different_versions_fetched_independently(self, monkeypatch, reset_arn_cache): + monkeypatch.setenv("OTEL_RESOURCE_ATTRIBUTES", f"cloud.resource_id={RUNTIME_ARN}") + app, mock_client = self._make_app_with_mock_client(COMPONENTS) + ref_v1 = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="1") + ref_v2 = ConfigBundleRef(bundle_arn=BUNDLE_ARN, bundle_version="2") + + app._resolve_bundle_config(ref_v1) + app._resolve_bundle_config(ref_v2) + + assert mock_client.get_configuration_bundle_version.call_count == 2 diff --git a/tests/bedrock_agentcore/runtime/test_context.py b/tests/bedrock_agentcore/runtime/test_context.py index f3179e9a..db37bed8 100644 --- a/tests/bedrock_agentcore/runtime/test_context.py +++ b/tests/bedrock_agentcore/runtime/test_context.py @@ -3,8 +3,11 @@ import contextvars from unittest.mock import MagicMock +from bedrock_agentcore.config_bundle.bundle import ConfigBundleRef from bedrock_agentcore.runtime.context import BedrockAgentCoreContext, RequestContext +ARN = "arn:aws:bedrock-agentcore:us-west-2:123456789012:bundle/my-agent" + class TestBedrockAgentCoreContext: """Test BedrockAgentCoreContext functionality.""" @@ -249,3 +252,84 @@ def test_request_context_allows_arbitrary_types(self): context = RequestContext(request=mock_request) assert context.request is mock_request + + +class TestBedrockAgentCoreContextConfigBundles: + """Tests for config bundle ContextVar methods on BedrockAgentCoreContext.""" + + def _fresh_context(self): + return contextvars.Context() + + def test_get_config_bundle_ref_returns_none_when_unset(self): + result = self._fresh_context().run(BedrockAgentCoreContext.get_config_bundle_ref) + assert result is None + + def test_set_and_get_config_bundle_ref(self): + ref = ConfigBundleRef(bundle_arn=ARN, bundle_version="2") + + def run(): + BedrockAgentCoreContext.set_config_bundle_ref(ref) + return BedrockAgentCoreContext.get_config_bundle_ref() + + result = self._fresh_context().run(run) + assert result == ref + + def test_config_bundle_ref_isolated_between_contexts(self): + ref1 = ConfigBundleRef(bundle_arn=ARN, bundle_version="1") + ref2 = ConfigBundleRef(bundle_arn=ARN, bundle_version="2") + + def run_ctx1(): + BedrockAgentCoreContext.set_config_bundle_ref(ref1) + return BedrockAgentCoreContext.get_config_bundle_ref() + + def run_ctx2(): + BedrockAgentCoreContext.set_config_bundle_ref(ref2) + return BedrockAgentCoreContext.get_config_bundle_ref() + + result1 = self._fresh_context().run(run_ctx1) + result2 = self._fresh_context().run(run_ctx2) + + assert result1 == ref1 + assert result2 == ref2 + + def test_get_config_returns_empty_when_no_loader(self): + result = self._fresh_context().run(BedrockAgentCoreContext.get_config_bundle) + assert result == {} + + def test_get_config_calls_fetcher(self): + config = {"model_id": "claude-3"} + fetcher = MagicMock(return_value=config) + + def run(): + BedrockAgentCoreContext._set_bundle_loader(fetcher) + return BedrockAgentCoreContext.get_config_bundle() + + result = self._fresh_context().run(run) + + assert result == config + fetcher.assert_called_once() + + def test_get_config_calls_fetcher_each_request(self): + # The context layer calls the fetcher directly — caching is the fetcher's + # responsibility (lru_cache on app._resolve_bundle_config in production). + config = {"model_id": "claude-3"} + fetcher = MagicMock(return_value=config) + + def run(): + BedrockAgentCoreContext._set_bundle_loader(fetcher) + return BedrockAgentCoreContext.get_config_bundle() + + result1 = self._fresh_context().run(run) + result2 = self._fresh_context().run(run) + + assert result1 == config + assert result2 == config + assert fetcher.call_count == 2 # each request calls the fetcher independently + + def test_clear_bundle_loader_returns_empty(self): + def run(): + BedrockAgentCoreContext._clear_bundle_loader() + return BedrockAgentCoreContext.get_config_bundle() + + result = self._fresh_context().run(run) + assert result == {} diff --git a/tests/bedrock_agentcore/runtime/test_tracing.py b/tests/bedrock_agentcore/runtime/test_tracing.py new file mode 100644 index 00000000..020b4595 --- /dev/null +++ b/tests/bedrock_agentcore/runtime/test_tracing.py @@ -0,0 +1,521 @@ +"""Tests for BaggageSpanProcessor and auto-registration helper.""" + +import threading +from unittest.mock import MagicMock, patch + +import pytest + +from bedrock_agentcore.runtime.tracing import BaggageSpanProcessor, _ensure_baggage_processor_registered + +# --------------------------------------------------------------------------- +# BaggageSpanProcessor.on_start +# --------------------------------------------------------------------------- + + +class TestBaggageSpanProcessorOnStart: + def setup_method(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, None) + + def _make_span(self): + span = MagicMock() + return span + + def test_sets_both_attributes_when_both_present(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment("arn:aws:bedrock:us-east-1:123:exp/e1", "blue") + span = self._make_span() + BaggageSpanProcessor().on_start(span) + + calls = {c[0][0]: c[0][1] for c in span.set_attribute.call_args_list} + assert calls["aws.agentcore.gateway.routing_experiment_arn"] == "arn:aws:bedrock:us-east-1:123:exp/e1" + assert calls["aws.agentcore.gateway.routing_experiment_variant_name"] == "blue" + + def test_sets_only_arn_when_variant_absent(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment("arn:aws:bedrock:us-east-1:123:exp/e1", None) + span = self._make_span() + BaggageSpanProcessor().on_start(span) + + calls = {c[0][0] for c in span.set_attribute.call_args_list} + assert "aws.agentcore.gateway.routing_experiment_arn" in calls + assert "aws.agentcore.gateway.routing_experiment_variant_name" not in calls + + def test_sets_only_variant_when_arn_absent(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, "green") + span = self._make_span() + BaggageSpanProcessor().on_start(span) + + calls = {c[0][0] for c in span.set_attribute.call_args_list} + assert "aws.agentcore.gateway.routing_experiment_variant_name" in calls + assert "aws.agentcore.gateway.routing_experiment_arn" not in calls + + def test_sets_no_attributes_when_both_absent(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, None) + span = self._make_span() + BaggageSpanProcessor().on_start(span) + + span.set_attribute.assert_not_called() + + def test_falls_back_to_parent_context_baggage_when_contextvars_empty(self): + """ASGI entry span: ContextVars not yet set, baggage in parent_context.""" + from opentelemetry import baggage as otel_baggage + from opentelemetry import context as otel_context + + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, None) + + parent_ctx = otel_baggage.set_baggage( + "aws.agentcore.gateway.routing_experiment_arn", + "arn:exp-fallback", + otel_baggage.set_baggage( + "aws.agentcore.gateway.routing_experiment_variant_name", + "green", + otel_context.get_current(), + ), + ) + + span = self._make_span() + BaggageSpanProcessor().on_start(span, parent_context=parent_ctx) + + calls = {c[0][0]: c[0][1] for c in span.set_attribute.call_args_list} + assert calls["aws.agentcore.gateway.routing_experiment_arn"] == "arn:exp-fallback" + assert calls["aws.agentcore.gateway.routing_experiment_variant_name"] == "green" + + def test_contextvar_takes_priority_over_parent_context_baggage(self): + """ContextVar value wins for both fields when both sources are set.""" + from opentelemetry import baggage as otel_baggage + from opentelemetry import context as otel_context + + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment("arn:exp-ctx", "blue") + + parent_ctx = otel_baggage.set_baggage( + "aws.agentcore.gateway.routing_experiment_arn", + "arn:exp-baggage", + otel_baggage.set_baggage( + "aws.agentcore.gateway.routing_experiment_variant_name", + "green", + otel_context.get_current(), + ), + ) + + span = self._make_span() + BaggageSpanProcessor().on_start(span, parent_context=parent_ctx) + + calls = {c[0][0]: c[0][1] for c in span.set_attribute.call_args_list} + assert calls["aws.agentcore.gateway.routing_experiment_arn"] == "arn:exp-ctx" + assert calls["aws.agentcore.gateway.routing_experiment_variant_name"] == "blue" + + def test_parent_context_fallback_skipped_when_opentelemetry_not_installed(self): + """When opentelemetry-api is absent, parent_context baggage fallback is silently skipped.""" + import sys + + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, None) + span = self._make_span() + + saved = {k: v for k, v in sys.modules.items() if k == "opentelemetry" or k.startswith("opentelemetry.")} + for key in saved: + del sys.modules[key] + try: + # Must not raise; span gets no attributes (no ContextVar, no baggage fallback). + BaggageSpanProcessor().on_start(span, parent_context=MagicMock()) + finally: + sys.modules.update(saved) + + span.set_attribute.assert_not_called() + + def test_different_contexts_get_different_values(self): + """Concurrent requests must not bleed experiment values into each other.""" + import contextvars + + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + processor = BaggageSpanProcessor() + results = {} + + def run_in_context(name, arn, variant): + ctx = contextvars.copy_context() + + def _inner(): + BedrockAgentCoreContext.set_routing_experiment(arn, variant) + span = MagicMock() + processor.on_start(span) + results[name] = {c[0][0]: c[0][1] for c in span.set_attribute.call_args_list} + + ctx.run(_inner) + + t1 = threading.Thread(target=run_in_context, args=("req1", "arn:exp-A", "blue")) + t2 = threading.Thread(target=run_in_context, args=("req2", "arn:exp-B", "green")) + t1.start() + t2.start() + t1.join() + t2.join() + + assert results["req1"]["aws.agentcore.gateway.routing_experiment_arn"] == "arn:exp-A" + assert results["req1"]["aws.agentcore.gateway.routing_experiment_variant_name"] == "blue" + assert results["req2"]["aws.agentcore.gateway.routing_experiment_arn"] == "arn:exp-B" + assert results["req2"]["aws.agentcore.gateway.routing_experiment_variant_name"] == "green" + + +class TestBaggageSpanProcessorNoOpMethods: + def test_on_end_does_not_raise(self): + BaggageSpanProcessor().on_end(MagicMock()) + + def test_shutdown_does_not_raise(self): + BaggageSpanProcessor().shutdown() + + def test_force_flush_returns_true(self): + assert BaggageSpanProcessor().force_flush() is True + + +# --------------------------------------------------------------------------- +# _ensure_baggage_processor_registered +# --------------------------------------------------------------------------- + + +class TestEnsureBaggageProcessorRegistered: + def setup_method(self): + # Reset module-level state before each test. + import bedrock_agentcore.runtime.tracing as tracing_mod + + tracing_mod._registered_on = None + + def test_registers_on_first_call(self): + mock_provider = MagicMock() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=mock_provider): + _ensure_baggage_processor_registered() + + mock_provider.add_span_processor.assert_called_once() + args = mock_provider.add_span_processor.call_args[0] + assert isinstance(args[0], BaggageSpanProcessor) + + def test_skips_registration_on_same_provider(self): + mock_provider = MagicMock() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=mock_provider): + _ensure_baggage_processor_registered() + _ensure_baggage_processor_registered() + + assert mock_provider.add_span_processor.call_count == 1 + + def test_re_registers_when_provider_replaced(self): + provider_a = MagicMock() + provider_b = MagicMock() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=provider_a): + _ensure_baggage_processor_registered() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=provider_b): + _ensure_baggage_processor_registered() + + provider_a.add_span_processor.assert_called_once() + provider_b.add_span_processor.assert_called_once() + + def test_noop_when_opentelemetry_not_installed(self): + import sys + + saved = {k: v for k, v in sys.modules.items() if k == "opentelemetry" or k.startswith("opentelemetry.")} + for key in saved: + del sys.modules[key] + try: + _ensure_baggage_processor_registered() # must not raise + finally: + sys.modules.update(saved) + + def test_thread_safe_registration(self): + """Only one add_span_processor call even under concurrent first requests.""" + mock_provider = MagicMock() + barrier = threading.Barrier(10) + + def _call(): + barrier.wait() + with patch("opentelemetry.trace.get_tracer_provider", return_value=mock_provider): + _ensure_baggage_processor_registered() + + threads = [threading.Thread(target=_call) for _ in range(10)] + for t in threads: + t.start() + for t in threads: + t.join() + + assert mock_provider.add_span_processor.call_count == 1 + + +# --------------------------------------------------------------------------- +# End-to-end: baggage header → ContextVar → span attribute (via app.py) +# --------------------------------------------------------------------------- + + +class TestBaggageEndToEnd: + def test_baggage_processor_registered_on_provider(self): + """BaggageSpanProcessor is registered on the TracerProvider when app is created.""" + import bedrock_agentcore.runtime.tracing as tracing_mod + from bedrock_agentcore.runtime import BedrockAgentCoreApp + + tracing_mod._registered_on = None + mock_provider = MagicMock() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=mock_provider): + BedrockAgentCoreApp() + + mock_provider.add_span_processor.assert_called_once() + processor = mock_provider.add_span_processor.call_args[0][0] + assert isinstance(processor, BaggageSpanProcessor) + + def test_contextvars_populated_during_handler(self): + """Baggage header → ContextVars are set by the time the handler runs.""" + from starlette.testclient import TestClient + + from bedrock_agentcore.runtime import BedrockAgentCoreApp + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + app = BedrockAgentCoreApp() + captured = {} + + @app.entrypoint + def handler(payload): + captured["arn"] = BedrockAgentCoreContext.get_routing_experiment_arn() + captured["variant"] = BedrockAgentCoreContext.get_routing_experiment_variant() + return {"ok": True} + + client = TestClient(app) + baggage = ( + "aws.agentcore.gateway.routing_experiment_arn=arn:aws:bedrock:us-east-1:123:exp/e1," + "aws.agentcore.gateway.routing_experiment_variant_name=canary" + ) + response = client.post("/invocations", json={}, headers={"baggage": baggage}) + + assert response.status_code == 200 + assert captured["arn"] == "arn:aws:bedrock:us-east-1:123:exp/e1" + assert captured["variant"] == "canary" + + def test_no_baggage_clears_experiment_context(self): + """Request without baggage sets both experiment ContextVars to None.""" + from starlette.testclient import TestClient + + from bedrock_agentcore.runtime import BedrockAgentCoreApp + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + app = BedrockAgentCoreApp() + captured = {} + + @app.entrypoint + def handler(payload): + captured["arn"] = BedrockAgentCoreContext.get_routing_experiment_arn() + captured["variant"] = BedrockAgentCoreContext.get_routing_experiment_variant() + return {"ok": True} + + client = TestClient(app) + response = client.post("/invocations", json={}) + + assert response.status_code == 200 + assert captured["arn"] is None + assert captured["variant"] is None + + def test_extract_baggage_error_clears_experiment_context(self): + """When _extract_baggage raises, all_baggage defaults to {} and ContextVars are set to None.""" + from starlette.testclient import TestClient + + from bedrock_agentcore.runtime import BedrockAgentCoreApp + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + app = BedrockAgentCoreApp() + captured = {} + + @app.entrypoint + def handler(payload): + captured["arn"] = BedrockAgentCoreContext.get_routing_experiment_arn() + captured["variant"] = BedrockAgentCoreContext.get_routing_experiment_variant() + return {"ok": True} + + with patch( + "bedrock_agentcore.runtime.app._extract_baggage", + side_effect=ValueError("malformed"), + ): + client = TestClient(app) + response = client.post("/invocations", json={}, headers={"baggage": "bad=data"}) + + assert response.status_code == 200 + assert captured["arn"] is None + assert captured["variant"] is None + + +# --------------------------------------------------------------------------- +# A2A: BedrockCallContextBuilder experiment baggage extraction +# --------------------------------------------------------------------------- + + +class TestA2ACallContextBuilderBaggage: + """BedrockCallContextBuilder.build() sets experiment ContextVars from W3C baggage.""" + + def setup_method(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, None) + + def _make_request(self, headers: dict): + mock_request = MagicMock() + mock_request.headers = headers + return mock_request + + def test_baggage_processor_registered_on_provider(self): + """BaggageSpanProcessor is registered on the TracerProvider when BedrockCallContextBuilder is created.""" + import bedrock_agentcore.runtime.tracing as tracing_mod + + tracing_mod._registered_on = None + mock_provider = MagicMock() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=mock_provider): + from bedrock_agentcore.runtime.a2a import BedrockCallContextBuilder + + BedrockCallContextBuilder() + + mock_provider.add_span_processor.assert_called_once() + assert isinstance(mock_provider.add_span_processor.call_args[0][0], BaggageSpanProcessor) + + def test_baggage_sets_experiment_context(self): + """Baggage header → experiment ContextVars are set by the time build() returns.""" + from bedrock_agentcore.runtime.a2a import BedrockCallContextBuilder + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + from bedrock_agentcore.runtime.models import REQUEST_ID_HEADER, SESSION_HEADER + + headers = { + "baggage": ( + "aws.agentcore.gateway.routing_experiment_arn=arn:aws:bedrock:us-east-1:123:exp/e1," + "aws.agentcore.gateway.routing_experiment_variant_name=blue" + ), + REQUEST_ID_HEADER: "req-123", + SESSION_HEADER: "sess-456", + } + builder = BedrockCallContextBuilder() + ctx = builder.build(self._make_request(headers)) + + assert BedrockAgentCoreContext.get_routing_experiment_arn() == "arn:aws:bedrock:us-east-1:123:exp/e1" + assert BedrockAgentCoreContext.get_routing_experiment_variant() == "blue" + # A2A protocol contract: request_id and session_id in ServerCallContext.state + assert ctx.state["request_id"] == "req-123" + assert ctx.state["session_id"] == "sess-456" + + def test_no_baggage_clears_experiment_context(self): + """Request without baggage sets both experiment ContextVars to None.""" + from bedrock_agentcore.runtime.a2a import BedrockCallContextBuilder + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + builder = BedrockCallContextBuilder() + builder.build(self._make_request({})) + + assert BedrockAgentCoreContext.get_routing_experiment_arn() is None + assert BedrockAgentCoreContext.get_routing_experiment_variant() is None + + def test_extract_baggage_error_clears_experiment_context(self): + """When _extract_baggage raises, all_baggage defaults to {} and ContextVars are set to None.""" + from bedrock_agentcore.runtime.a2a import BedrockCallContextBuilder + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + builder = BedrockCallContextBuilder() + with patch( + "bedrock_agentcore.runtime.a2a._extract_baggage", + side_effect=ValueError("malformed"), + ): + builder.build(self._make_request({"baggage": "bad=data"})) + + assert BedrockAgentCoreContext.get_routing_experiment_arn() is None + assert BedrockAgentCoreContext.get_routing_experiment_variant() is None + + +# --------------------------------------------------------------------------- +# AG-UI: AGUIApp._build_request_context experiment baggage extraction +# --------------------------------------------------------------------------- + + +class TestAGUIBaggageExtraction: + """AGUIApp._build_request_context() sets experiment ContextVars from W3C baggage.""" + + def setup_method(self): + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + BedrockAgentCoreContext.set_routing_experiment(None, None) + + def _make_request(self, headers: dict): + mock_request = MagicMock() + mock_request.headers = headers + return mock_request + + def test_baggage_processor_registered_on_provider(self): + """BaggageSpanProcessor is registered on the TracerProvider when AGUIApp is created.""" + pytest.importorskip("ag_ui") + + import bedrock_agentcore.runtime.tracing as tracing_mod + + tracing_mod._registered_on = None + mock_provider = MagicMock() + + with patch("opentelemetry.trace.get_tracer_provider", return_value=mock_provider): + from bedrock_agentcore.runtime.ag_ui import AGUIApp + + AGUIApp() + + mock_provider.add_span_processor.assert_called_once() + assert isinstance(mock_provider.add_span_processor.call_args[0][0], BaggageSpanProcessor) + + def test_baggage_sets_experiment_context(self): + """Baggage header → experiment ContextVars are set by the time _build_request_context() returns.""" + pytest.importorskip("ag_ui") + + from bedrock_agentcore.runtime.ag_ui import AGUIApp + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + app = AGUIApp() + headers = { + "baggage": ( + "aws.agentcore.gateway.routing_experiment_arn=arn:aws:bedrock:us-east-1:123:exp/e2," + "aws.agentcore.gateway.routing_experiment_variant_name=green" + ) + } + app._build_request_context(self._make_request(headers)) + + assert BedrockAgentCoreContext.get_routing_experiment_arn() == "arn:aws:bedrock:us-east-1:123:exp/e2" + assert BedrockAgentCoreContext.get_routing_experiment_variant() == "green" + + def test_no_baggage_clears_experiment_context(self): + """Request without baggage sets both experiment ContextVars to None.""" + pytest.importorskip("ag_ui") + + from bedrock_agentcore.runtime.ag_ui import AGUIApp + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + app = AGUIApp() + app._build_request_context(self._make_request({})) + + assert BedrockAgentCoreContext.get_routing_experiment_arn() is None + assert BedrockAgentCoreContext.get_routing_experiment_variant() is None + + def test_extract_baggage_error_clears_experiment_context(self): + """When _extract_baggage raises, all_baggage defaults to {} and ContextVars are set to None.""" + pytest.importorskip("ag_ui") + + from bedrock_agentcore.runtime.ag_ui import AGUIApp + from bedrock_agentcore.runtime.context import BedrockAgentCoreContext + + app = AGUIApp() + with patch( + "bedrock_agentcore.runtime.ag_ui._extract_baggage", + side_effect=ValueError("malformed"), + ): + app._build_request_context(self._make_request({"baggage": "bad=data"})) + + assert BedrockAgentCoreContext.get_routing_experiment_arn() is None + assert BedrockAgentCoreContext.get_routing_experiment_variant() is None diff --git a/uv.lock b/uv.lock index f51dc923..3f6751bd 100644 --- a/uv.lock +++ b/uv.lock @@ -290,6 +290,10 @@ a2a = [ ag-ui = [ { name = "ag-ui-protocol" }, ] +simulation = [ + { name = "jinja2" }, + { name = "strands-agents-evals" }, +] strands-agents = [ { name = "strands-agents" }, ] @@ -321,18 +325,20 @@ dev = [ requires-dist = [ { name = "a2a-sdk", extras = ["http-server"], marker = "extra == 'a2a'", specifier = ">=0.3" }, { name = "ag-ui-protocol", marker = "extra == 'ag-ui'", specifier = ">=0.1.10" }, - { name = "boto3", specifier = ">=1.42.86" }, - { name = "botocore", specifier = ">=1.42.86" }, + { name = "boto3", specifier = ">=1.43.0" }, + { name = "botocore", specifier = ">=1.43.0" }, + { name = "jinja2", marker = "extra == 'simulation'", specifier = ">=3.1.0" }, { name = "pydantic", specifier = ">=2.0.0,<2.41.3" }, { name = "starlette", specifier = ">=0.46.2" }, { name = "strands-agents", marker = "extra == 'strands-agents'", specifier = ">=1.1.0" }, + { name = "strands-agents-evals", marker = "extra == 'simulation'", specifier = ">=0.1.0" }, { name = "strands-agents-evals", marker = "extra == 'strands-agents-evals'", specifier = ">=0.1.0" }, { name = "typing-extensions", specifier = ">=4.13.2,<5.0.0" }, { name = "urllib3", specifier = ">=1.26.0" }, { name = "uvicorn", specifier = ">=0.34.2" }, { name = "websockets", specifier = ">=12.0" }, ] -provides-extras = ["a2a", "ag-ui", "strands-agents", "strands-agents-evals"] +provides-extras = ["a2a", "ag-ui", "strands-agents", "strands-agents-evals", "simulation"] [package.metadata.requires-dev] dev = [ @@ -356,30 +362,30 @@ dev = [ [[package]] name = "boto3" -version = "1.42.86" +version = "1.43.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, { name = "jmespath" }, { name = "s3transfer" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9d/4f/62b22f38db5a8e35db1fbd7b8ee31e502975a785c7f1799af12fd0764aa3/boto3-1.42.86.tar.gz", hash = "sha256:c87d2a750b1a8cad0384d1a83d3bad6aedf924ae9a14aaba814bcb3297b39c01", size = 112783, upload-time = "2026-04-09T01:00:47.129Z" } +sdist = { url = "https://files.pythonhosted.org/packages/98/36/028c12ed6ed85009a21b5472eb76c27f9b0341c6986f06f83475b40aaf51/boto3-1.43.1.tar.gz", hash = "sha256:9e4f85a7884797ff0f52c257094730ed228aaa07fa8134775ff8f86909cf4f2a", size = 113175, upload-time = "2026-04-30T20:27:04.569Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c9/bd/2f51c3491e01fd6fbc62c148da4c9594dacf9ef979080eaeff5e3bc0027f/boto3-1.42.86-py3-none-any.whl", hash = "sha256:492c3c7cbbe9842882680064902f50cf711b5ab770d26525549872339ed95d5b", size = 140557, upload-time = "2026-04-09T01:00:44.202Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d1/b8b2d5420c51cd8f7ec044ceecbf24b060156680b26519e1d482e160c3c8/boto3-1.43.1-py3-none-any.whl", hash = "sha256:3840bf0345b9aefcc5915176a19d227f63cfba7778c65e6e52d61c6ea0a10fdc", size = 140498, upload-time = "2026-04-30T20:27:01.791Z" }, ] [[package]] name = "botocore" -version = "1.42.86" +version = "1.43.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "jmespath" }, { name = "python-dateutil" }, { name = "urllib3" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/d0/8c/a99259dbd8734e5e3f57cf223e225457e9c6be3821e6310519df2d362234/botocore-1.42.86.tar.gz", hash = "sha256:baa49e93b4c92d63e0c8288026ee1ef8de83f182743127cc9175504440a48e49", size = 15176910, upload-time = "2026-04-09T01:00:34.636Z" } +sdist = { url = "https://files.pythonhosted.org/packages/03/b7/416ae6f1461d6fec3b3aaffc4759371319c71a21f7ab4c3106ee574fda8d/botocore-1.43.1.tar.gz", hash = "sha256:270d6357d662550fdb84973ec247e02bece0b6283d90bf37319c7753515336e4", size = 15296915, upload-time = "2026-04-30T20:26:50.962Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ae/63/af7dda21ea68b8f85013e3f253c48435cacf07e41face86032d217df82a2/botocore-1.42.86-py3-none-any.whl", hash = "sha256:443387337864e069f7e4e885ccdc81592725b5598ca966514af3e9776bce0bfe", size = 14857738, upload-time = "2026-04-09T01:00:30.166Z" }, + { url = "https://files.pythonhosted.org/packages/09/48/dc2290d2af8b1dc3a44d210555a90f0cb76ef913c52b0c4f31a43cce27b8/botocore-1.43.1-py3-none-any.whl", hash = "sha256:955edc6a398b9c4100cf0d5a31433fdba3835500bf38c1ef171e6e75f4b477d2", size = 14979119, upload-time = "2026-04-30T20:26:46.031Z" }, ] [[package]] @@ -635,62 +641,62 @@ toml = [ [[package]] name = "cryptography" -version = "46.0.5" +version = "46.0.7" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, { name = "typing-extensions", marker = "python_full_version < '3.11'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/f7/81/b0bb27f2ba931a65409c6b8a8b358a7f03c0e46eceacddff55f7c84b1f3b/cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad", size = 7176289, upload-time = "2026-02-10T19:17:08.274Z" }, - { url = "https://files.pythonhosted.org/packages/ff/9e/6b4397a3e3d15123de3b1806ef342522393d50736c13b20ec4c9ea6693a6/cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b", size = 4275637, upload-time = "2026-02-10T19:17:10.53Z" }, - { url = "https://files.pythonhosted.org/packages/63/e7/471ab61099a3920b0c77852ea3f0ea611c9702f651600397ac567848b897/cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b", size = 4424742, upload-time = "2026-02-10T19:17:12.388Z" }, - { url = "https://files.pythonhosted.org/packages/37/53/a18500f270342d66bf7e4d9f091114e31e5ee9e7375a5aba2e85a91e0044/cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263", size = 4277528, upload-time = "2026-02-10T19:17:13.853Z" }, - { url = "https://files.pythonhosted.org/packages/22/29/c2e812ebc38c57b40e7c583895e73c8c5adb4d1e4a0cc4c5a4fdab2b1acc/cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d", size = 4947993, upload-time = "2026-02-10T19:17:15.618Z" }, - { url = "https://files.pythonhosted.org/packages/6b/e7/237155ae19a9023de7e30ec64e5d99a9431a567407ac21170a046d22a5a3/cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed", size = 4456855, upload-time = "2026-02-10T19:17:17.221Z" }, - { url = "https://files.pythonhosted.org/packages/2d/87/fc628a7ad85b81206738abbd213b07702bcbdada1dd43f72236ef3cffbb5/cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2", size = 3984635, upload-time = "2026-02-10T19:17:18.792Z" }, - { url = "https://files.pythonhosted.org/packages/84/29/65b55622bde135aedf4565dc509d99b560ee4095e56989e815f8fd2aa910/cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2", size = 4277038, upload-time = "2026-02-10T19:17:20.256Z" }, - { url = "https://files.pythonhosted.org/packages/bc/36/45e76c68d7311432741faf1fbf7fac8a196a0a735ca21f504c75d37e2558/cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0", size = 4912181, upload-time = "2026-02-10T19:17:21.825Z" }, - { url = "https://files.pythonhosted.org/packages/6d/1a/c1ba8fead184d6e3d5afcf03d569acac5ad063f3ac9fb7258af158f7e378/cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731", size = 4456482, upload-time = "2026-02-10T19:17:25.133Z" }, - { url = "https://files.pythonhosted.org/packages/f9/e5/3fb22e37f66827ced3b902cf895e6a6bc1d095b5b26be26bd13c441fdf19/cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82", size = 4405497, upload-time = "2026-02-10T19:17:26.66Z" }, - { url = "https://files.pythonhosted.org/packages/1a/df/9d58bb32b1121a8a2f27383fabae4d63080c7ca60b9b5c88be742be04ee7/cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1", size = 4667819, upload-time = "2026-02-10T19:17:28.569Z" }, - { url = "https://files.pythonhosted.org/packages/ea/ed/325d2a490c5e94038cdb0117da9397ece1f11201f425c4e9c57fe5b9f08b/cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48", size = 3028230, upload-time = "2026-02-10T19:17:30.518Z" }, - { url = "https://files.pythonhosted.org/packages/e9/5a/ac0f49e48063ab4255d9e3b79f5def51697fce1a95ea1370f03dc9db76f6/cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4", size = 3480909, upload-time = "2026-02-10T19:17:32.083Z" }, - { url = "https://files.pythonhosted.org/packages/00/13/3d278bfa7a15a96b9dc22db5a12ad1e48a9eb3d40e1827ef66a5df75d0d0/cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2", size = 7119287, upload-time = "2026-02-10T19:17:33.801Z" }, - { url = "https://files.pythonhosted.org/packages/67/c8/581a6702e14f0898a0848105cbefd20c058099e2c2d22ef4e476dfec75d7/cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678", size = 4265728, upload-time = "2026-02-10T19:17:35.569Z" }, - { url = "https://files.pythonhosted.org/packages/dd/4a/ba1a65ce8fc65435e5a849558379896c957870dd64fecea97b1ad5f46a37/cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87", size = 4408287, upload-time = "2026-02-10T19:17:36.938Z" }, - { url = "https://files.pythonhosted.org/packages/f8/67/8ffdbf7b65ed1ac224d1c2df3943553766914a8ca718747ee3871da6107e/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee", size = 4270291, upload-time = "2026-02-10T19:17:38.748Z" }, - { url = "https://files.pythonhosted.org/packages/f8/e5/f52377ee93bc2f2bba55a41a886fd208c15276ffbd2569f2ddc89d50e2c5/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981", size = 4927539, upload-time = "2026-02-10T19:17:40.241Z" }, - { url = "https://files.pythonhosted.org/packages/3b/02/cfe39181b02419bbbbcf3abdd16c1c5c8541f03ca8bda240debc467d5a12/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9", size = 4442199, upload-time = "2026-02-10T19:17:41.789Z" }, - { url = "https://files.pythonhosted.org/packages/c0/96/2fcaeb4873e536cf71421a388a6c11b5bc846e986b2b069c79363dc1648e/cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648", size = 3960131, upload-time = "2026-02-10T19:17:43.379Z" }, - { url = "https://files.pythonhosted.org/packages/d8/d2/b27631f401ddd644e94c5cf33c9a4069f72011821cf3dc7309546b0642a0/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4", size = 4270072, upload-time = "2026-02-10T19:17:45.481Z" }, - { url = "https://files.pythonhosted.org/packages/f4/a7/60d32b0370dae0b4ebe55ffa10e8599a2a59935b5ece1b9f06edb73abdeb/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0", size = 4892170, upload-time = "2026-02-10T19:17:46.997Z" }, - { url = "https://files.pythonhosted.org/packages/d2/b9/cf73ddf8ef1164330eb0b199a589103c363afa0cf794218c24d524a58eab/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663", size = 4441741, upload-time = "2026-02-10T19:17:48.661Z" }, - { url = "https://files.pythonhosted.org/packages/5f/eb/eee00b28c84c726fe8fa0158c65afe312d9c3b78d9d01daf700f1f6e37ff/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826", size = 4396728, upload-time = "2026-02-10T19:17:50.058Z" }, - { url = "https://files.pythonhosted.org/packages/65/f4/6bc1a9ed5aef7145045114b75b77c2a8261b4d38717bd8dea111a63c3442/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d", size = 4652001, upload-time = "2026-02-10T19:17:51.54Z" }, - { url = "https://files.pythonhosted.org/packages/86/ef/5d00ef966ddd71ac2e6951d278884a84a40ffbd88948ef0e294b214ae9e4/cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a", size = 3003637, upload-time = "2026-02-10T19:17:52.997Z" }, - { url = "https://files.pythonhosted.org/packages/b7/57/f3f4160123da6d098db78350fdfd9705057aad21de7388eacb2401dceab9/cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4", size = 3469487, upload-time = "2026-02-10T19:17:54.549Z" }, - { url = "https://files.pythonhosted.org/packages/e2/fa/a66aa722105ad6a458bebd64086ca2b72cdd361fed31763d20390f6f1389/cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31", size = 7170514, upload-time = "2026-02-10T19:17:56.267Z" }, - { url = "https://files.pythonhosted.org/packages/0f/04/c85bdeab78c8bc77b701bf0d9bdcf514c044e18a46dcff330df5448631b0/cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18", size = 4275349, upload-time = "2026-02-10T19:17:58.419Z" }, - { url = "https://files.pythonhosted.org/packages/5c/32/9b87132a2f91ee7f5223b091dc963055503e9b442c98fc0b8a5ca765fab0/cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235", size = 4420667, upload-time = "2026-02-10T19:18:00.619Z" }, - { url = "https://files.pythonhosted.org/packages/a1/a6/a7cb7010bec4b7c5692ca6f024150371b295ee1c108bdc1c400e4c44562b/cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a", size = 4276980, upload-time = "2026-02-10T19:18:02.379Z" }, - { url = "https://files.pythonhosted.org/packages/8e/7c/c4f45e0eeff9b91e3f12dbd0e165fcf2a38847288fcfd889deea99fb7b6d/cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76", size = 4939143, upload-time = "2026-02-10T19:18:03.964Z" }, - { url = "https://files.pythonhosted.org/packages/37/19/e1b8f964a834eddb44fa1b9a9976f4e414cbb7aa62809b6760c8803d22d1/cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614", size = 4453674, upload-time = "2026-02-10T19:18:05.588Z" }, - { url = "https://files.pythonhosted.org/packages/db/ed/db15d3956f65264ca204625597c410d420e26530c4e2943e05a0d2f24d51/cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229", size = 3978801, upload-time = "2026-02-10T19:18:07.167Z" }, - { url = "https://files.pythonhosted.org/packages/41/e2/df40a31d82df0a70a0daf69791f91dbb70e47644c58581d654879b382d11/cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1", size = 4276755, upload-time = "2026-02-10T19:18:09.813Z" }, - { url = "https://files.pythonhosted.org/packages/33/45/726809d1176959f4a896b86907b98ff4391a8aa29c0aaaf9450a8a10630e/cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d", size = 4901539, upload-time = "2026-02-10T19:18:11.263Z" }, - { url = "https://files.pythonhosted.org/packages/99/0f/a3076874e9c88ecb2ecc31382f6e7c21b428ede6f55aafa1aa272613e3cd/cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c", size = 4452794, upload-time = "2026-02-10T19:18:12.914Z" }, - { url = "https://files.pythonhosted.org/packages/02/ef/ffeb542d3683d24194a38f66ca17c0a4b8bf10631feef44a7ef64e631b1a/cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4", size = 4404160, upload-time = "2026-02-10T19:18:14.375Z" }, - { url = "https://files.pythonhosted.org/packages/96/93/682d2b43c1d5f1406ed048f377c0fc9fc8f7b0447a478d5c65ab3d3a66eb/cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9", size = 4667123, upload-time = "2026-02-10T19:18:15.886Z" }, - { url = "https://files.pythonhosted.org/packages/45/2d/9c5f2926cb5300a8eefc3f4f0b3f3df39db7f7ce40c8365444c49363cbda/cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72", size = 3010220, upload-time = "2026-02-10T19:18:17.361Z" }, - { url = "https://files.pythonhosted.org/packages/48/ef/0c2f4a8e31018a986949d34a01115dd057bf536905dca38897bacd21fac3/cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595", size = 3467050, upload-time = "2026-02-10T19:18:18.899Z" }, - { url = "https://files.pythonhosted.org/packages/eb/dd/2d9fdb07cebdf3d51179730afb7d5e576153c6744c3ff8fded23030c204e/cryptography-46.0.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:3b4995dc971c9fb83c25aa44cf45f02ba86f71ee600d81091c2f0cbae116b06c", size = 3476964, upload-time = "2026-02-10T19:18:20.687Z" }, - { url = "https://files.pythonhosted.org/packages/e9/6f/6cc6cc9955caa6eaf83660b0da2b077c7fe8ff9950a3c5e45d605038d439/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:bc84e875994c3b445871ea7181d424588171efec3e185dced958dad9e001950a", size = 4218321, upload-time = "2026-02-10T19:18:22.349Z" }, - { url = "https://files.pythonhosted.org/packages/3e/5d/c4da701939eeee699566a6c1367427ab91a8b7088cc2328c09dbee940415/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:2ae6971afd6246710480e3f15824ed3029a60fc16991db250034efd0b9fb4356", size = 4381786, upload-time = "2026-02-10T19:18:24.529Z" }, - { url = "https://files.pythonhosted.org/packages/ac/97/a538654732974a94ff96c1db621fa464f455c02d4bb7d2652f4edc21d600/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:d861ee9e76ace6cf36a6a89b959ec08e7bc2493ee39d07ffe5acb23ef46d27da", size = 4217990, upload-time = "2026-02-10T19:18:25.957Z" }, - { url = "https://files.pythonhosted.org/packages/ae/11/7e500d2dd3ba891197b9efd2da5454b74336d64a7cc419aa7327ab74e5f6/cryptography-46.0.5-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:2b7a67c9cd56372f3249b39699f2ad479f6991e62ea15800973b956f4b73e257", size = 4381252, upload-time = "2026-02-10T19:18:27.496Z" }, - { url = "https://files.pythonhosted.org/packages/bc/58/6b3d24e6b9bc474a2dcdee65dfd1f008867015408a271562e4b690561a4d/cryptography-46.0.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:8456928655f856c6e1533ff59d5be76578a7157224dbd9ce6872f25055ab9ab7", size = 3407605, upload-time = "2026-02-10T19:18:29.233Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/47/93/ac8f3d5ff04d54bc814e961a43ae5b0b146154c89c61b47bb07557679b18/cryptography-46.0.7.tar.gz", hash = "sha256:e4cfd68c5f3e0bfdad0d38e023239b96a2fe84146481852dffbcca442c245aa5", size = 750652, upload-time = "2026-04-08T01:57:54.692Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/5d/4a8f770695d73be252331e60e526291e3df0c9b27556a90a6b47bccca4c2/cryptography-46.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:ea42cbe97209df307fdc3b155f1b6fa2577c0defa8f1f7d3be7d31d189108ad4", size = 7179869, upload-time = "2026-04-08T01:56:17.157Z" }, + { url = "https://files.pythonhosted.org/packages/5f/45/6d80dc379b0bbc1f9d1e429f42e4cb9e1d319c7a8201beffd967c516ea01/cryptography-46.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b36a4695e29fe69215d75960b22577197aca3f7a25b9cf9d165dcfe9d80bc325", size = 4275492, upload-time = "2026-04-08T01:56:19.36Z" }, + { url = "https://files.pythonhosted.org/packages/4a/9a/1765afe9f572e239c3469f2cb429f3ba7b31878c893b246b4b2994ffe2fe/cryptography-46.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5ad9ef796328c5e3c4ceed237a183f5d41d21150f972455a9d926593a1dcb308", size = 4426670, upload-time = "2026-04-08T01:56:21.415Z" }, + { url = "https://files.pythonhosted.org/packages/8f/3e/af9246aaf23cd4ee060699adab1e47ced3f5f7e7a8ffdd339f817b446462/cryptography-46.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:73510b83623e080a2c35c62c15298096e2a5dc8d51c3b4e1740211839d0dea77", size = 4280275, upload-time = "2026-04-08T01:56:23.539Z" }, + { url = "https://files.pythonhosted.org/packages/0f/54/6bbbfc5efe86f9d71041827b793c24811a017c6ac0fd12883e4caa86b8ed/cryptography-46.0.7-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cbd5fb06b62bd0721e1170273d3f4d5a277044c47ca27ee257025146c34cbdd1", size = 4928402, upload-time = "2026-04-08T01:56:25.624Z" }, + { url = "https://files.pythonhosted.org/packages/2d/cf/054b9d8220f81509939599c8bdbc0c408dbd2bdd41688616a20731371fe0/cryptography-46.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:420b1e4109cc95f0e5700eed79908cef9268265c773d3a66f7af1eef53d409ef", size = 4459985, upload-time = "2026-04-08T01:56:27.309Z" }, + { url = "https://files.pythonhosted.org/packages/f9/46/4e4e9c6040fb01c7467d47217d2f882daddeb8828f7df800cb806d8a2288/cryptography-46.0.7-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:24402210aa54baae71d99441d15bb5a1919c195398a87b563df84468160a65de", size = 3990652, upload-time = "2026-04-08T01:56:29.095Z" }, + { url = "https://files.pythonhosted.org/packages/36/5f/313586c3be5a2fbe87e4c9a254207b860155a8e1f3cca99f9910008e7d08/cryptography-46.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8a469028a86f12eb7d2fe97162d0634026d92a21f3ae0ac87ed1c4a447886c83", size = 4279805, upload-time = "2026-04-08T01:56:30.928Z" }, + { url = "https://files.pythonhosted.org/packages/69/33/60dfc4595f334a2082749673386a4d05e4f0cf4df8248e63b2c3437585f2/cryptography-46.0.7-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9694078c5d44c157ef3162e3bf3946510b857df5a3955458381d1c7cfc143ddb", size = 4892883, upload-time = "2026-04-08T01:56:32.614Z" }, + { url = "https://files.pythonhosted.org/packages/c7/0b/333ddab4270c4f5b972f980adef4faa66951a4aaf646ca067af597f15563/cryptography-46.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:42a1e5f98abb6391717978baf9f90dc28a743b7d9be7f0751a6f56a75d14065b", size = 4459756, upload-time = "2026-04-08T01:56:34.306Z" }, + { url = "https://files.pythonhosted.org/packages/d2/14/633913398b43b75f1234834170947957c6b623d1701ffc7a9600da907e89/cryptography-46.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:91bbcb08347344f810cbe49065914fe048949648f6bd5c2519f34619142bbe85", size = 4410244, upload-time = "2026-04-08T01:56:35.977Z" }, + { url = "https://files.pythonhosted.org/packages/10/f2/19ceb3b3dc14009373432af0c13f46aa08e3ce334ec6eff13492e1812ccd/cryptography-46.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5d1c02a14ceb9148cc7816249f64f623fbfee39e8c03b3650d842ad3f34d637e", size = 4674868, upload-time = "2026-04-08T01:56:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/1a/bb/a5c213c19ee94b15dfccc48f363738633a493812687f5567addbcbba9f6f/cryptography-46.0.7-cp311-abi3-win32.whl", hash = "sha256:d23c8ca48e44ee015cd0a54aeccdf9f09004eba9fc96f38c911011d9ff1bd457", size = 3026504, upload-time = "2026-04-08T01:56:39.666Z" }, + { url = "https://files.pythonhosted.org/packages/2b/02/7788f9fefa1d060ca68717c3901ae7fffa21ee087a90b7f23c7a603c32ae/cryptography-46.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:397655da831414d165029da9bc483bed2fe0e75dde6a1523ec2fe63f3c46046b", size = 3488363, upload-time = "2026-04-08T01:56:41.893Z" }, + { url = "https://files.pythonhosted.org/packages/7b/56/15619b210e689c5403bb0540e4cb7dbf11a6bf42e483b7644e471a2812b3/cryptography-46.0.7-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:d151173275e1728cf7839aaa80c34fe550c04ddb27b34f48c232193df8db5842", size = 7119671, upload-time = "2026-04-08T01:56:44Z" }, + { url = "https://files.pythonhosted.org/packages/74/66/e3ce040721b0b5599e175ba91ab08884c75928fbeb74597dd10ef13505d2/cryptography-46.0.7-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:db0f493b9181c7820c8134437eb8b0b4792085d37dbb24da050476ccb664e59c", size = 4268551, upload-time = "2026-04-08T01:56:46.071Z" }, + { url = "https://files.pythonhosted.org/packages/03/11/5e395f961d6868269835dee1bafec6a1ac176505a167f68b7d8818431068/cryptography-46.0.7-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ebd6daf519b9f189f85c479427bbd6e9c9037862cf8fe89ee35503bd209ed902", size = 4408887, upload-time = "2026-04-08T01:56:47.718Z" }, + { url = "https://files.pythonhosted.org/packages/40/53/8ed1cf4c3b9c8e611e7122fb56f1c32d09e1fff0f1d77e78d9ff7c82653e/cryptography-46.0.7-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:b7b412817be92117ec5ed95f880defe9cf18a832e8cafacf0a22337dc1981b4d", size = 4271354, upload-time = "2026-04-08T01:56:49.312Z" }, + { url = "https://files.pythonhosted.org/packages/50/46/cf71e26025c2e767c5609162c866a78e8a2915bbcfa408b7ca495c6140c4/cryptography-46.0.7-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:fbfd0e5f273877695cb93baf14b185f4878128b250cc9f8e617ea0c025dfb022", size = 4905845, upload-time = "2026-04-08T01:56:50.916Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ea/01276740375bac6249d0a971ebdf6b4dc9ead0ee0a34ef3b5a88c1a9b0d4/cryptography-46.0.7-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:ffca7aa1d00cf7d6469b988c581598f2259e46215e0140af408966a24cf086ce", size = 4444641, upload-time = "2026-04-08T01:56:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/3d/4c/7d258f169ae71230f25d9f3d06caabcff8c3baf0978e2b7d65e0acac3827/cryptography-46.0.7-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:60627cf07e0d9274338521205899337c5d18249db56865f943cbe753aa96f40f", size = 3967749, upload-time = "2026-04-08T01:56:54.597Z" }, + { url = "https://files.pythonhosted.org/packages/b5/2a/2ea0767cad19e71b3530e4cad9605d0b5e338b6a1e72c37c9c1ceb86c333/cryptography-46.0.7-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:80406c3065e2c55d7f49a9550fe0c49b3f12e5bfff5dedb727e319e1afb9bf99", size = 4270942, upload-time = "2026-04-08T01:56:56.416Z" }, + { url = "https://files.pythonhosted.org/packages/41/3d/fe14df95a83319af25717677e956567a105bb6ab25641acaa093db79975d/cryptography-46.0.7-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:c5b1ccd1239f48b7151a65bc6dd54bcfcc15e028c8ac126d3fada09db0e07ef1", size = 4871079, upload-time = "2026-04-08T01:56:58.31Z" }, + { url = "https://files.pythonhosted.org/packages/9c/59/4a479e0f36f8f378d397f4eab4c850b4ffb79a2f0d58704b8fa0703ddc11/cryptography-46.0.7-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:d5f7520159cd9c2154eb61eb67548ca05c5774d39e9c2c4339fd793fe7d097b2", size = 4443999, upload-time = "2026-04-08T01:57:00.508Z" }, + { url = "https://files.pythonhosted.org/packages/28/17/b59a741645822ec6d04732b43c5d35e4ef58be7bfa84a81e5ae6f05a1d33/cryptography-46.0.7-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fcd8eac50d9138c1d7fc53a653ba60a2bee81a505f9f8850b6b2888555a45d0e", size = 4399191, upload-time = "2026-04-08T01:57:02.654Z" }, + { url = "https://files.pythonhosted.org/packages/59/6a/bb2e166d6d0e0955f1e9ff70f10ec4b2824c9cfcdb4da772c7dd69cc7d80/cryptography-46.0.7-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:65814c60f8cc400c63131584e3e1fad01235edba2614b61fbfbfa954082db0ee", size = 4655782, upload-time = "2026-04-08T01:57:04.592Z" }, + { url = "https://files.pythonhosted.org/packages/95/b6/3da51d48415bcb63b00dc17c2eff3a651b7c4fed484308d0f19b30e8cb2c/cryptography-46.0.7-cp314-cp314t-win32.whl", hash = "sha256:fdd1736fed309b4300346f88f74cd120c27c56852c3838cab416e7a166f67298", size = 3002227, upload-time = "2026-04-08T01:57:06.91Z" }, + { url = "https://files.pythonhosted.org/packages/32/a8/9f0e4ed57ec9cebe506e58db11ae472972ecb0c659e4d52bbaee80ca340a/cryptography-46.0.7-cp314-cp314t-win_amd64.whl", hash = "sha256:e06acf3c99be55aa3b516397fe42f5855597f430add9c17fa46bf2e0fb34c9bb", size = 3475332, upload-time = "2026-04-08T01:57:08.807Z" }, + { url = "https://files.pythonhosted.org/packages/a7/7f/cd42fc3614386bc0c12f0cb3c4ae1fc2bbca5c9662dfed031514911d513d/cryptography-46.0.7-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:462ad5cb1c148a22b2e3bcc5ad52504dff325d17daf5df8d88c17dda1f75f2a4", size = 7165618, upload-time = "2026-04-08T01:57:10.645Z" }, + { url = "https://files.pythonhosted.org/packages/a5/d0/36a49f0262d2319139d2829f773f1b97ef8aef7f97e6e5bd21455e5a8fb5/cryptography-46.0.7-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:84d4cced91f0f159a7ddacad249cc077e63195c36aac40b4150e7a57e84fffe7", size = 4270628, upload-time = "2026-04-08T01:57:12.885Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6c/1a42450f464dda6ffbe578a911f773e54dd48c10f9895a23a7e88b3e7db5/cryptography-46.0.7-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:128c5edfe5e5938b86b03941e94fac9ee793a94452ad1365c9fc3f4f62216832", size = 4415405, upload-time = "2026-04-08T01:57:14.923Z" }, + { url = "https://files.pythonhosted.org/packages/9a/92/4ed714dbe93a066dc1f4b4581a464d2d7dbec9046f7c8b7016f5286329e2/cryptography-46.0.7-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:5e51be372b26ef4ba3de3c167cd3d1022934bc838ae9eaad7e644986d2a3d163", size = 4272715, upload-time = "2026-04-08T01:57:16.638Z" }, + { url = "https://files.pythonhosted.org/packages/b7/e6/a26b84096eddd51494bba19111f8fffe976f6a09f132706f8f1bf03f51f7/cryptography-46.0.7-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:cdf1a610ef82abb396451862739e3fc93b071c844399e15b90726ef7470eeaf2", size = 4918400, upload-time = "2026-04-08T01:57:19.021Z" }, + { url = "https://files.pythonhosted.org/packages/c7/08/ffd537b605568a148543ac3c2b239708ae0bd635064bab41359252ef88ed/cryptography-46.0.7-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1d25aee46d0c6f1a501adcddb2d2fee4b979381346a78558ed13e50aa8a59067", size = 4450634, upload-time = "2026-04-08T01:57:21.185Z" }, + { url = "https://files.pythonhosted.org/packages/16/01/0cd51dd86ab5b9befe0d031e276510491976c3a80e9f6e31810cce46c4ad/cryptography-46.0.7-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:cdfbe22376065ffcf8be74dc9a909f032df19bc58a699456a21712d6e5eabfd0", size = 3985233, upload-time = "2026-04-08T01:57:22.862Z" }, + { url = "https://files.pythonhosted.org/packages/92/49/819d6ed3a7d9349c2939f81b500a738cb733ab62fbecdbc1e38e83d45e12/cryptography-46.0.7-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:abad9dac36cbf55de6eb49badd4016806b3165d396f64925bf2999bcb67837ba", size = 4271955, upload-time = "2026-04-08T01:57:24.814Z" }, + { url = "https://files.pythonhosted.org/packages/80/07/ad9b3c56ebb95ed2473d46df0847357e01583f4c52a85754d1a55e29e4d0/cryptography-46.0.7-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:935ce7e3cfdb53e3536119a542b839bb94ec1ad081013e9ab9b7cfd478b05006", size = 4879888, upload-time = "2026-04-08T01:57:26.88Z" }, + { url = "https://files.pythonhosted.org/packages/b8/c7/201d3d58f30c4c2bdbe9b03844c291feb77c20511cc3586daf7edc12a47b/cryptography-46.0.7-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:35719dc79d4730d30f1c2b6474bd6acda36ae2dfae1e3c16f2051f215df33ce0", size = 4449961, upload-time = "2026-04-08T01:57:29.068Z" }, + { url = "https://files.pythonhosted.org/packages/a5/ef/649750cbf96f3033c3c976e112265c33906f8e462291a33d77f90356548c/cryptography-46.0.7-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:7bbc6ccf49d05ac8f7d7b5e2e2c33830d4fe2061def88210a126d130d7f71a85", size = 4401696, upload-time = "2026-04-08T01:57:31.029Z" }, + { url = "https://files.pythonhosted.org/packages/41/52/a8908dcb1a389a459a29008c29966c1d552588d4ae6d43f3a1a4512e0ebe/cryptography-46.0.7-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a1529d614f44b863a7b480c6d000fe93b59acee9c82ffa027cfadc77521a9f5e", size = 4664256, upload-time = "2026-04-08T01:57:33.144Z" }, + { url = "https://files.pythonhosted.org/packages/4b/fa/f0ab06238e899cc3fb332623f337a7364f36f4bb3f2534c2bb95a35b132c/cryptography-46.0.7-cp38-abi3-win32.whl", hash = "sha256:f247c8c1a1fb45e12586afbb436ef21ff1e80670b2861a90353d9b025583d246", size = 3013001, upload-time = "2026-04-08T01:57:34.933Z" }, + { url = "https://files.pythonhosted.org/packages/d2/f1/00ce3bde3ca542d1acd8f8cfa38e446840945aa6363f9b74746394b14127/cryptography-46.0.7-cp38-abi3-win_amd64.whl", hash = "sha256:506c4ff91eff4f82bdac7633318a526b1d1309fc07ca76a3ad182cb5b686d6d3", size = 3472985, upload-time = "2026-04-08T01:57:36.714Z" }, + { url = "https://files.pythonhosted.org/packages/63/0c/dca8abb64e7ca4f6b2978769f6fea5ad06686a190cec381f0a796fdcaaba/cryptography-46.0.7-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:fc9ab8856ae6cf7c9358430e49b368f3108f050031442eaeb6b9d87e4dcf4e4f", size = 3476879, upload-time = "2026-04-08T01:57:38.664Z" }, + { url = "https://files.pythonhosted.org/packages/3a/ea/075aac6a84b7c271578d81a2f9968acb6e273002408729f2ddff517fed4a/cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d3b99c535a9de0adced13d159c5a9cf65c325601aa30f4be08afd680643e9c15", size = 4219700, upload-time = "2026-04-08T01:57:40.625Z" }, + { url = "https://files.pythonhosted.org/packages/6c/7b/1c55db7242b5e5612b29fc7a630e91ee7a6e3c8e7bf5406d22e206875fbd/cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d02c738dacda7dc2a74d1b2b3177042009d5cab7c7079db74afc19e56ca1b455", size = 4385982, upload-time = "2026-04-08T01:57:42.725Z" }, + { url = "https://files.pythonhosted.org/packages/cb/da/9870eec4b69c63ef5925bf7d8342b7e13bc2ee3d47791461c4e49ca212f4/cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:04959522f938493042d595a736e7dbdff6eb6cc2339c11465b3ff89343b65f65", size = 4219115, upload-time = "2026-04-08T01:57:44.939Z" }, + { url = "https://files.pythonhosted.org/packages/f4/72/05aa5832b82dd341969e9a734d1812a6aadb088d9eb6f0430fc337cc5a8f/cryptography-46.0.7-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:3986ac1dee6def53797289999eabe84798ad7817f3e97779b5061a95b0ee4968", size = 4385479, upload-time = "2026-04-08T01:57:46.86Z" }, + { url = "https://files.pythonhosted.org/packages/20/2a/1b016902351a523aa2bd446b50a5bc1175d7a7d1cf90fe2ef904f9b84ebc/cryptography-46.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:258514877e15963bd43b558917bc9f54cf7cf866c38aa576ebf47a77ddbc43a4", size = 3412829, upload-time = "2026-04-08T01:57:48.874Z" }, ] [[package]] @@ -1482,100 +1488,100 @@ wheels = [ [[package]] name = "pillow" -version = "12.1.1" -source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/30/5bd3d794762481f8c8ae9c80e7b76ecea73b916959eb587521358ef0b2f9/pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0", size = 5304099, upload-time = "2026-02-11T04:20:06.13Z" }, - { url = "https://files.pythonhosted.org/packages/bd/c1/aab9e8f3eeb4490180e357955e15c2ef74b31f64790ff356c06fb6cf6d84/pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713", size = 4657880, upload-time = "2026-02-11T04:20:09.291Z" }, - { url = "https://files.pythonhosted.org/packages/f1/0a/9879e30d56815ad529d3985aeff5af4964202425c27261a6ada10f7cbf53/pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b", size = 6222587, upload-time = "2026-02-11T04:20:10.82Z" }, - { url = "https://files.pythonhosted.org/packages/5a/5f/a1b72ff7139e4f89014e8d451442c74a774d5c43cd938fb0a9f878576b37/pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b", size = 8027678, upload-time = "2026-02-11T04:20:12.455Z" }, - { url = "https://files.pythonhosted.org/packages/e2/c2/c7cb187dac79a3d22c3ebeae727abee01e077c8c7d930791dc592f335153/pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4", size = 6335777, upload-time = "2026-02-11T04:20:14.441Z" }, - { url = "https://files.pythonhosted.org/packages/0c/7b/f9b09a7804ec7336effb96c26d37c29d27225783dc1501b7d62dcef6ae25/pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4", size = 7027140, upload-time = "2026-02-11T04:20:16.387Z" }, - { url = "https://files.pythonhosted.org/packages/98/b2/2fa3c391550bd421b10849d1a2144c44abcd966daadd2f7c12e19ea988c4/pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e", size = 6449855, upload-time = "2026-02-11T04:20:18.554Z" }, - { url = "https://files.pythonhosted.org/packages/96/ff/9caf4b5b950c669263c39e96c78c0d74a342c71c4f43fd031bb5cb7ceac9/pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff", size = 7151329, upload-time = "2026-02-11T04:20:20.646Z" }, - { url = "https://files.pythonhosted.org/packages/7b/f8/4b24841f582704da675ca535935bccb32b00a6da1226820845fac4a71136/pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40", size = 6325574, upload-time = "2026-02-11T04:20:22.43Z" }, - { url = "https://files.pythonhosted.org/packages/f8/f9/9f6b01c0881d7036063aa6612ef04c0e2cad96be21325a1e92d0203f8e91/pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23", size = 7032347, upload-time = "2026-02-11T04:20:23.932Z" }, - { url = "https://files.pythonhosted.org/packages/79/13/c7922edded3dcdaf10c59297540b72785620abc0538872c819915746757d/pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9", size = 2453457, upload-time = "2026-02-11T04:20:25.392Z" }, - { url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" }, - { url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" }, - { url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" }, - { url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" }, - { url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" }, - { url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" }, - { url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" }, - { url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" }, - { url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" }, - { url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" }, - { url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" }, - { url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" }, - { url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" }, - { url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" }, - { url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" }, - { url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" }, - { url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" }, - { url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" }, - { url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" }, - { url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" }, - { url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" }, - { url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" }, - { url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" }, - { url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" }, - { url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" }, - { url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" }, - { url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" }, - { url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" }, - { url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" }, - { url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" }, - { url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" }, - { url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" }, - { url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" }, - { url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" }, - { url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" }, - { url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" }, - { url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" }, - { url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" }, - { url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" }, - { url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" }, - { url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" }, - { url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" }, - { url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" }, - { url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" }, - { url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" }, - { url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" }, - { url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" }, - { url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" }, - { url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" }, - { url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" }, - { url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" }, - { url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" }, - { url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" }, - { url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" }, - { url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" }, - { url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" }, - { url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" }, - { url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" }, - { url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" }, - { url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" }, - { url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" }, - { url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" }, - { url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" }, - { url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" }, - { url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" }, - { url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" }, - { url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" }, - { url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" }, - { url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" }, - { url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" }, - { url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" }, - { url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" }, - { url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" }, - { url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" }, - { url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" }, - { url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" }, - { url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" }, - { url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" }, - { url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" }, +version = "12.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/21/c2bcdd5906101a30244eaffc1b6e6ce71a31bd0742a01eb89e660ebfac2d/pillow-12.2.0.tar.gz", hash = "sha256:a830b1a40919539d07806aa58e1b114df53ddd43213d9c8b75847eee6c0182b5", size = 46987819, upload-time = "2026-04-01T14:46:17.687Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3a/aa/d0b28e1c811cd4d5f5c2bfe2e022292bd255ae5744a3b9ac7d6c8f72dd75/pillow-12.2.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:a4e8f36e677d3336f35089648c8955c51c6d386a13cf6ee9c189c5f5bd713a9f", size = 5354355, upload-time = "2026-04-01T14:42:15.402Z" }, + { url = "https://files.pythonhosted.org/packages/27/8e/1d5b39b8ae2bd7650d0c7b6abb9602d16043ead9ebbfef4bc4047454da2a/pillow-12.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e589959f10d9824d39b350472b92f0ce3b443c0a3442ebf41c40cb8361c5b97", size = 4695871, upload-time = "2026-04-01T14:42:18.234Z" }, + { url = "https://files.pythonhosted.org/packages/f0/c5/dcb7a6ca6b7d3be41a76958e90018d56c8462166b3ef223150360850c8da/pillow-12.2.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a52edc8bfff4429aaabdf4d9ee0daadbbf8562364f940937b941f87a4290f5ff", size = 6269734, upload-time = "2026-04-01T14:42:20.608Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f1/aa1bb13b2f4eba914e9637893c73f2af8e48d7d4023b9d3750d4c5eb2d0c/pillow-12.2.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:975385f4776fafde056abb318f612ef6285b10a1f12b8570f3647ad0d74b48ec", size = 8076080, upload-time = "2026-04-01T14:42:23.095Z" }, + { url = "https://files.pythonhosted.org/packages/a1/2a/8c79d6a53169937784604a8ae8d77e45888c41537f7f6f65ed1f407fe66d/pillow-12.2.0-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bd9c0c7a0c681a347b3194c500cb1e6ca9cab053ea4d82a5cf45b6b754560136", size = 6382236, upload-time = "2026-04-01T14:42:25.82Z" }, + { url = "https://files.pythonhosted.org/packages/b5/42/bbcb6051030e1e421d103ce7a8ecadf837aa2f39b8f82ef1a8d37c3d4ebc/pillow-12.2.0-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:88d387ff40b3ff7c274947ed3125dedf5262ec6919d83946753b5f3d7c67ea4c", size = 7070220, upload-time = "2026-04-01T14:42:28.68Z" }, + { url = "https://files.pythonhosted.org/packages/3f/e1/c2a7d6dd8cfa6b231227da096fd2d58754bab3603b9d73bf609d3c18b64f/pillow-12.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:51c4167c34b0d8ba05b547a3bb23578d0ba17b80a5593f93bd8ecb123dd336a3", size = 6493124, upload-time = "2026-04-01T14:42:31.579Z" }, + { url = "https://files.pythonhosted.org/packages/5f/41/7c8617da5d32e1d2f026e509484fdb6f3ad7efaef1749a0c1928adbb099e/pillow-12.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:34c0d99ecccea270c04882cb3b86e7b57296079c9a4aff88cb3b33563d95afaa", size = 7194324, upload-time = "2026-04-01T14:42:34.615Z" }, + { url = "https://files.pythonhosted.org/packages/2d/de/a777627e19fd6d62f84070ee1521adde5eeda4855b5cf60fe0b149118bca/pillow-12.2.0-cp310-cp310-win32.whl", hash = "sha256:b85f66ae9eb53e860a873b858b789217ba505e5e405a24b85c0464822fe88032", size = 6376363, upload-time = "2026-04-01T14:42:37.19Z" }, + { url = "https://files.pythonhosted.org/packages/e7/34/fc4cb5204896465842767b96d250c08410f01f2f28afc43b257de842eed5/pillow-12.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:673aa32138f3e7531ccdbca7b3901dba9b70940a19ccecc6a37c77d5fdeb05b5", size = 7083523, upload-time = "2026-04-01T14:42:39.62Z" }, + { url = "https://files.pythonhosted.org/packages/2d/a0/32852d36bc7709f14dc3f64f929a275e958ad8c19a6deba9610d458e28b3/pillow-12.2.0-cp310-cp310-win_arm64.whl", hash = "sha256:3e080565d8d7c671db5802eedfb438e5565ffa40115216eabb8cd52d0ecce024", size = 2463318, upload-time = "2026-04-01T14:42:42.063Z" }, + { url = "https://files.pythonhosted.org/packages/68/e1/748f5663efe6edcfc4e74b2b93edfb9b8b99b67f21a854c3ae416500a2d9/pillow-12.2.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:8be29e59487a79f173507c30ddf57e733a357f67881430449bb32614075a40ab", size = 5354347, upload-time = "2026-04-01T14:42:44.255Z" }, + { url = "https://files.pythonhosted.org/packages/47/a1/d5ff69e747374c33a3b53b9f98cca7889fce1fd03d79cdc4e1bccc6c5a87/pillow-12.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:71cde9a1e1551df7d34a25462fc60325e8a11a82cc2e2f54578e5e9a1e153d65", size = 4695873, upload-time = "2026-04-01T14:42:46.452Z" }, + { url = "https://files.pythonhosted.org/packages/df/21/e3fbdf54408a973c7f7f89a23b2cb97a7ef30c61ab4142af31eee6aebc88/pillow-12.2.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f490f9368b6fc026f021db16d7ec2fbf7d89e2edb42e8ec09d2c60505f5729c7", size = 6280168, upload-time = "2026-04-01T14:42:49.228Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f1/00b7278c7dd52b17ad4329153748f87b6756ec195ff786c2bdf12518337d/pillow-12.2.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8bd7903a5f2a4545f6fd5935c90058b89d30045568985a71c79f5fd6edf9b91e", size = 8088188, upload-time = "2026-04-01T14:42:51.735Z" }, + { url = "https://files.pythonhosted.org/packages/ad/cf/220a5994ef1b10e70e85748b75649d77d506499352be135a4989c957b701/pillow-12.2.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3997232e10d2920a68d25191392e3a4487d8183039e1c74c2297f00ed1c50705", size = 6394401, upload-time = "2026-04-01T14:42:54.343Z" }, + { url = "https://files.pythonhosted.org/packages/e9/bd/e51a61b1054f09437acfbc2ff9106c30d1eb76bc1453d428399946781253/pillow-12.2.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e74473c875d78b8e9d5da2a70f7099549f9eb37ded4e2f6a463e60125bccd176", size = 7079655, upload-time = "2026-04-01T14:42:56.954Z" }, + { url = "https://files.pythonhosted.org/packages/6b/3d/45132c57d5fb4b5744567c3817026480ac7fc3ce5d4c47902bc0e7f6f853/pillow-12.2.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:56a3f9c60a13133a98ecff6197af34d7824de9b7b38c3654861a725c970c197b", size = 6503105, upload-time = "2026-04-01T14:42:59.847Z" }, + { url = "https://files.pythonhosted.org/packages/7d/2e/9df2fc1e82097b1df3dce58dc43286aa01068e918c07574711fcc53e6fb4/pillow-12.2.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:90e6f81de50ad6b534cab6e5aef77ff6e37722b2f5d908686f4a5c9eba17a909", size = 7203402, upload-time = "2026-04-01T14:43:02.664Z" }, + { url = "https://files.pythonhosted.org/packages/bd/2e/2941e42858ebb67e50ae741473de81c2984e6eff7b397017623c676e2e8d/pillow-12.2.0-cp311-cp311-win32.whl", hash = "sha256:8c984051042858021a54926eb597d6ee3012393ce9c181814115df4c60b9a808", size = 6378149, upload-time = "2026-04-01T14:43:05.274Z" }, + { url = "https://files.pythonhosted.org/packages/69/42/836b6f3cd7f3e5fa10a1f1a5420447c17966044c8fbf589cc0452d5502db/pillow-12.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:6e6b2a0c538fc200b38ff9eb6628228b77908c319a005815f2dde585a0664b60", size = 7082626, upload-time = "2026-04-01T14:43:08.557Z" }, + { url = "https://files.pythonhosted.org/packages/c2/88/549194b5d6f1f494b485e493edc6693c0a16f4ada488e5bd974ed1f42fad/pillow-12.2.0-cp311-cp311-win_arm64.whl", hash = "sha256:9a8a34cc89c67a65ea7437ce257cea81a9dad65b29805f3ecee8c8fe8ff25ffe", size = 2463531, upload-time = "2026-04-01T14:43:10.743Z" }, + { url = "https://files.pythonhosted.org/packages/58/be/7482c8a5ebebbc6470b3eb791812fff7d5e0216c2be3827b30b8bb6603ed/pillow-12.2.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2d192a155bbcec180f8564f693e6fd9bccff5a7af9b32e2e4bf8c9c69dbad6b5", size = 5308279, upload-time = "2026-04-01T14:43:13.246Z" }, + { url = "https://files.pythonhosted.org/packages/d8/95/0a351b9289c2b5cbde0bacd4a83ebc44023e835490a727b2a3bd60ddc0f4/pillow-12.2.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f3f40b3c5a968281fd507d519e444c35f0ff171237f4fdde090dd60699458421", size = 4695490, upload-time = "2026-04-01T14:43:15.584Z" }, + { url = "https://files.pythonhosted.org/packages/de/af/4e8e6869cbed569d43c416fad3dc4ecb944cb5d9492defaed89ddd6fe871/pillow-12.2.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:03e7e372d5240cc23e9f07deca4d775c0817bffc641b01e9c3af208dbd300987", size = 6284462, upload-time = "2026-04-01T14:43:18.268Z" }, + { url = "https://files.pythonhosted.org/packages/e9/9e/c05e19657fd57841e476be1ab46c4d501bffbadbafdc31a6d665f8b737b6/pillow-12.2.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b86024e52a1b269467a802258c25521e6d742349d760728092e1bc2d135b4d76", size = 8094744, upload-time = "2026-04-01T14:43:20.716Z" }, + { url = "https://files.pythonhosted.org/packages/2b/54/1789c455ed10176066b6e7e6da1b01e50e36f94ba584dc68d9eebfe9156d/pillow-12.2.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7371b48c4fa448d20d2714c9a1f775a81155050d383333e0a6c15b1123dda005", size = 6398371, upload-time = "2026-04-01T14:43:23.443Z" }, + { url = "https://files.pythonhosted.org/packages/43/e3/fdc657359e919462369869f1c9f0e973f353f9a9ee295a39b1fea8ee1a77/pillow-12.2.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:62f5409336adb0663b7caa0da5c7d9e7bdbaae9ce761d34669420c2a801b2780", size = 7087215, upload-time = "2026-04-01T14:43:26.758Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f8/2f6825e441d5b1959d2ca5adec984210f1ec086435b0ed5f52c19b3b8a6e/pillow-12.2.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:01afa7cf67f74f09523699b4e88c73fb55c13346d212a59a2db1f86b0a63e8c5", size = 6509783, upload-time = "2026-04-01T14:43:29.56Z" }, + { url = "https://files.pythonhosted.org/packages/67/f9/029a27095ad20f854f9dba026b3ea6428548316e057e6fc3545409e86651/pillow-12.2.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc3d34d4a8fbec3e88a79b92e5465e0f9b842b628675850d860b8bd300b159f5", size = 7212112, upload-time = "2026-04-01T14:43:32.091Z" }, + { url = "https://files.pythonhosted.org/packages/be/42/025cfe05d1be22dbfdb4f264fe9de1ccda83f66e4fc3aac94748e784af04/pillow-12.2.0-cp312-cp312-win32.whl", hash = "sha256:58f62cc0f00fd29e64b29f4fd923ffdb3859c9f9e6105bfc37ba1d08994e8940", size = 6378489, upload-time = "2026-04-01T14:43:34.601Z" }, + { url = "https://files.pythonhosted.org/packages/5d/7b/25a221d2c761c6a8ae21bfa3874988ff2583e19cf8a27bf2fee358df7942/pillow-12.2.0-cp312-cp312-win_amd64.whl", hash = "sha256:7f84204dee22a783350679a0333981df803dac21a0190d706a50475e361c93f5", size = 7084129, upload-time = "2026-04-01T14:43:37.213Z" }, + { url = "https://files.pythonhosted.org/packages/10/e1/542a474affab20fd4a0f1836cb234e8493519da6b76899e30bcc5d990b8b/pillow-12.2.0-cp312-cp312-win_arm64.whl", hash = "sha256:af73337013e0b3b46f175e79492d96845b16126ddf79c438d7ea7ff27783a414", size = 2463612, upload-time = "2026-04-01T14:43:39.421Z" }, + { url = "https://files.pythonhosted.org/packages/4a/01/53d10cf0dbad820a8db274d259a37ba50b88b24768ddccec07355382d5ad/pillow-12.2.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:8297651f5b5679c19968abefd6bb84d95fe30ef712eb1b2d9b2d31ca61267f4c", size = 4100837, upload-time = "2026-04-01T14:43:41.506Z" }, + { url = "https://files.pythonhosted.org/packages/0f/98/f3a6657ecb698c937f6c76ee564882945f29b79bad496abcba0e84659ec5/pillow-12.2.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:50d8520da2a6ce0af445fa6d648c4273c3eeefbc32d7ce049f22e8b5c3daecc2", size = 4176528, upload-time = "2026-04-01T14:43:43.773Z" }, + { url = "https://files.pythonhosted.org/packages/69/bc/8986948f05e3ea490b8442ea1c1d4d990b24a7e43d8a51b2c7d8b1dced36/pillow-12.2.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:766cef22385fa1091258ad7e6216792b156dc16d8d3fa607e7545b2b72061f1c", size = 3640401, upload-time = "2026-04-01T14:43:45.87Z" }, + { url = "https://files.pythonhosted.org/packages/34/46/6c717baadcd62bc8ed51d238d521ab651eaa74838291bda1f86fe1f864c9/pillow-12.2.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5d2fd0fa6b5d9d1de415060363433f28da8b1526c1c129020435e186794b3795", size = 5308094, upload-time = "2026-04-01T14:43:48.438Z" }, + { url = "https://files.pythonhosted.org/packages/71/43/905a14a8b17fdb1ccb58d282454490662d2cb89a6bfec26af6d3520da5ec/pillow-12.2.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:56b25336f502b6ed02e889f4ece894a72612fe885889a6e8c4c80239ff6e5f5f", size = 4695402, upload-time = "2026-04-01T14:43:51.292Z" }, + { url = "https://files.pythonhosted.org/packages/73/dd/42107efcb777b16fa0393317eac58f5b5cf30e8392e266e76e51cff28c3d/pillow-12.2.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f1c943e96e85df3d3478f7b691f229887e143f81fedab9b20205349ab04d73ed", size = 6280005, upload-time = "2026-04-01T14:43:54.242Z" }, + { url = "https://files.pythonhosted.org/packages/a8/68/b93e09e5e8549019e61acf49f65b1a8530765a7f812c77a7461bca7e4494/pillow-12.2.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:03f6fab9219220f041c74aeaa2939ff0062bd5c364ba9ce037197f4c6d498cd9", size = 8090669, upload-time = "2026-04-01T14:43:57.335Z" }, + { url = "https://files.pythonhosted.org/packages/4b/6e/3ccb54ce8ec4ddd1accd2d89004308b7b0b21c4ac3d20fa70af4760a4330/pillow-12.2.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5cdfebd752ec52bf5bb4e35d9c64b40826bc5b40a13df7c3cda20a2c03a0f5ed", size = 6395194, upload-time = "2026-04-01T14:43:59.864Z" }, + { url = "https://files.pythonhosted.org/packages/67/ee/21d4e8536afd1a328f01b359b4d3997b291ffd35a237c877b331c1c3b71c/pillow-12.2.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eedf4b74eda2b5a4b2b2fb4c006d6295df3bf29e459e198c90ea48e130dc75c3", size = 7082423, upload-time = "2026-04-01T14:44:02.74Z" }, + { url = "https://files.pythonhosted.org/packages/78/5f/e9f86ab0146464e8c133fe85df987ed9e77e08b29d8d35f9f9f4d6f917ba/pillow-12.2.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:00a2865911330191c0b818c59103b58a5e697cae67042366970a6b6f1b20b7f9", size = 6505667, upload-time = "2026-04-01T14:44:05.381Z" }, + { url = "https://files.pythonhosted.org/packages/ed/1e/409007f56a2fdce61584fd3acbc2bbc259857d555196cedcadc68c015c82/pillow-12.2.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e1757442ed87f4912397c6d35a0db6a7b52592156014706f17658ff58bbf795", size = 7208580, upload-time = "2026-04-01T14:44:08.39Z" }, + { url = "https://files.pythonhosted.org/packages/23/c4/7349421080b12fb35414607b8871e9534546c128a11965fd4a7002ccfbee/pillow-12.2.0-cp313-cp313-win32.whl", hash = "sha256:144748b3af2d1b358d41286056d0003f47cb339b8c43a9ea42f5fea4d8c66b6e", size = 6375896, upload-time = "2026-04-01T14:44:11.197Z" }, + { url = "https://files.pythonhosted.org/packages/3f/82/8a3739a5e470b3c6cbb1d21d315800d8e16bff503d1f16b03a4ec3212786/pillow-12.2.0-cp313-cp313-win_amd64.whl", hash = "sha256:390ede346628ccc626e5730107cde16c42d3836b89662a115a921f28440e6a3b", size = 7081266, upload-time = "2026-04-01T14:44:13.947Z" }, + { url = "https://files.pythonhosted.org/packages/c3/25/f968f618a062574294592f668218f8af564830ccebdd1fa6200f598e65c5/pillow-12.2.0-cp313-cp313-win_arm64.whl", hash = "sha256:8023abc91fba39036dbce14a7d6535632f99c0b857807cbbbf21ecc9f4717f06", size = 2463508, upload-time = "2026-04-01T14:44:16.312Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a4/b342930964e3cb4dce5038ae34b0eab4653334995336cd486c5a8c25a00c/pillow-12.2.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:042db20a421b9bafecc4b84a8b6e444686bd9d836c7fd24542db3e7df7baad9b", size = 5309927, upload-time = "2026-04-01T14:44:18.89Z" }, + { url = "https://files.pythonhosted.org/packages/9f/de/23198e0a65a9cf06123f5435a5d95cea62a635697f8f03d134d3f3a96151/pillow-12.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:dd025009355c926a84a612fecf58bb315a3f6814b17ead51a8e48d3823d9087f", size = 4698624, upload-time = "2026-04-01T14:44:21.115Z" }, + { url = "https://files.pythonhosted.org/packages/01/a6/1265e977f17d93ea37aa28aa81bad4fa597933879fac2520d24e021c8da3/pillow-12.2.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:88ddbc66737e277852913bd1e07c150cc7bb124539f94c4e2df5344494e0a612", size = 6321252, upload-time = "2026-04-01T14:44:23.663Z" }, + { url = "https://files.pythonhosted.org/packages/3c/83/5982eb4a285967baa70340320be9f88e57665a387e3a53a7f0db8231a0cd/pillow-12.2.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d362d1878f00c142b7e1a16e6e5e780f02be8195123f164edf7eddd911eefe7c", size = 8126550, upload-time = "2026-04-01T14:44:26.772Z" }, + { url = "https://files.pythonhosted.org/packages/4e/48/6ffc514adce69f6050d0753b1a18fd920fce8cac87620d5a31231b04bfc5/pillow-12.2.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2c727a6d53cb0018aadd8018c2b938376af27914a68a492f59dfcaca650d5eea", size = 6433114, upload-time = "2026-04-01T14:44:29.615Z" }, + { url = "https://files.pythonhosted.org/packages/36/a3/f9a77144231fb8d40ee27107b4463e205fa4677e2ca2548e14da5cf18dce/pillow-12.2.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:efd8c21c98c5cc60653bcb311bef2ce0401642b7ce9d09e03a7da87c878289d4", size = 7115667, upload-time = "2026-04-01T14:44:32.773Z" }, + { url = "https://files.pythonhosted.org/packages/c1/fc/ac4ee3041e7d5a565e1c4fd72a113f03b6394cc72ab7089d27608f8aaccb/pillow-12.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9f08483a632889536b8139663db60f6724bfcb443c96f1b18855860d7d5c0fd4", size = 6538966, upload-time = "2026-04-01T14:44:35.252Z" }, + { url = "https://files.pythonhosted.org/packages/c0/a8/27fb307055087f3668f6d0a8ccb636e7431d56ed0750e07a60547b1e083e/pillow-12.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:dac8d77255a37e81a2efcbd1fc05f1c15ee82200e6c240d7e127e25e365c39ea", size = 7238241, upload-time = "2026-04-01T14:44:37.875Z" }, + { url = "https://files.pythonhosted.org/packages/ad/4b/926ab182c07fccae9fcb120043464e1ff1564775ec8864f21a0ebce6ac25/pillow-12.2.0-cp313-cp313t-win32.whl", hash = "sha256:ee3120ae9dff32f121610bb08e4313be87e03efeadfc6c0d18f89127e24d0c24", size = 6379592, upload-time = "2026-04-01T14:44:40.336Z" }, + { url = "https://files.pythonhosted.org/packages/c2/c4/f9e476451a098181b30050cc4c9a3556b64c02cf6497ea421ac047e89e4b/pillow-12.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:325ca0528c6788d2a6c3d40e3568639398137346c3d6e66bb61db96b96511c98", size = 7085542, upload-time = "2026-04-01T14:44:43.251Z" }, + { url = "https://files.pythonhosted.org/packages/00/a4/285f12aeacbe2d6dc36c407dfbbe9e96d4a80b0fb710a337f6d2ad978c75/pillow-12.2.0-cp313-cp313t-win_arm64.whl", hash = "sha256:2e5a76d03a6c6dcef67edabda7a52494afa4035021a79c8558e14af25313d453", size = 2465765, upload-time = "2026-04-01T14:44:45.996Z" }, + { url = "https://files.pythonhosted.org/packages/bf/98/4595daa2365416a86cb0d495248a393dfc84e96d62ad080c8546256cb9c0/pillow-12.2.0-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:3adc9215e8be0448ed6e814966ecf3d9952f0ea40eb14e89a102b87f450660d8", size = 4100848, upload-time = "2026-04-01T14:44:48.48Z" }, + { url = "https://files.pythonhosted.org/packages/0b/79/40184d464cf89f6663e18dfcf7ca21aae2491fff1a16127681bf1fa9b8cf/pillow-12.2.0-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:6a9adfc6d24b10f89588096364cc726174118c62130c817c2837c60cf08a392b", size = 4176515, upload-time = "2026-04-01T14:44:51.353Z" }, + { url = "https://files.pythonhosted.org/packages/b0/63/703f86fd4c422a9cf722833670f4f71418fb116b2853ff7da722ea43f184/pillow-12.2.0-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:6a6e67ea2e6feda684ed370f9a1c52e7a243631c025ba42149a2cc5934dec295", size = 3640159, upload-time = "2026-04-01T14:44:53.588Z" }, + { url = "https://files.pythonhosted.org/packages/71/e0/fb22f797187d0be2270f83500aab851536101b254bfa1eae10795709d283/pillow-12.2.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2bb4a8d594eacdfc59d9e5ad972aa8afdd48d584ffd5f13a937a664c3e7db0ed", size = 5312185, upload-time = "2026-04-01T14:44:56.039Z" }, + { url = "https://files.pythonhosted.org/packages/ba/8c/1a9e46228571de18f8e28f16fabdfc20212a5d019f3e3303452b3f0a580d/pillow-12.2.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:80b2da48193b2f33ed0c32c38140f9d3186583ce7d516526d462645fd98660ae", size = 4695386, upload-time = "2026-04-01T14:44:58.663Z" }, + { url = "https://files.pythonhosted.org/packages/70/62/98f6b7f0c88b9addd0e87c217ded307b36be024d4ff8869a812b241d1345/pillow-12.2.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:22db17c68434de69d8ecfc2fe821569195c0c373b25cccb9cbdacf2c6e53c601", size = 6280384, upload-time = "2026-04-01T14:45:01.5Z" }, + { url = "https://files.pythonhosted.org/packages/5e/03/688747d2e91cfbe0e64f316cd2e8005698f76ada3130d0194664174fa5de/pillow-12.2.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7b14cc0106cd9aecda615dd6903840a058b4700fcb817687d0ee4fc8b6e389be", size = 8091599, upload-time = "2026-04-01T14:45:04.5Z" }, + { url = "https://files.pythonhosted.org/packages/f6/35/577e22b936fcdd66537329b33af0b4ccfefaeabd8aec04b266528cddb33c/pillow-12.2.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8cbeb542b2ebc6fcdacabf8aca8c1a97c9b3ad3927d46b8723f9d4f033288a0f", size = 6396021, upload-time = "2026-04-01T14:45:07.117Z" }, + { url = "https://files.pythonhosted.org/packages/11/8d/d2532ad2a603ca2b93ad9f5135732124e57811d0168155852f37fbce2458/pillow-12.2.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4bfd07bc812fbd20395212969e41931001fd59eb55a60658b0e5710872e95286", size = 7083360, upload-time = "2026-04-01T14:45:09.763Z" }, + { url = "https://files.pythonhosted.org/packages/5e/26/d325f9f56c7e039034897e7380e9cc202b1e368bfd04d4cbe6a441f02885/pillow-12.2.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:9aba9a17b623ef750a4d11b742cbafffeb48a869821252b30ee21b5e91392c50", size = 6507628, upload-time = "2026-04-01T14:45:12.378Z" }, + { url = "https://files.pythonhosted.org/packages/5f/f7/769d5632ffb0988f1c5e7660b3e731e30f7f8ec4318e94d0a5d674eb65a4/pillow-12.2.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:deede7c263feb25dba4e82ea23058a235dcc2fe1f6021025dc71f2b618e26104", size = 7209321, upload-time = "2026-04-01T14:45:15.122Z" }, + { url = "https://files.pythonhosted.org/packages/6a/7a/c253e3c645cd47f1aceea6a8bacdba9991bf45bb7dfe927f7c893e89c93c/pillow-12.2.0-cp314-cp314-win32.whl", hash = "sha256:632ff19b2778e43162304d50da0181ce24ac5bb8180122cbe1bf4673428328c7", size = 6479723, upload-time = "2026-04-01T14:45:17.797Z" }, + { url = "https://files.pythonhosted.org/packages/cd/8b/601e6566b957ca50e28725cb6c355c59c2c8609751efbecd980db44e0349/pillow-12.2.0-cp314-cp314-win_amd64.whl", hash = "sha256:4e6c62e9d237e9b65fac06857d511e90d8461a32adcc1b9065ea0c0fa3a28150", size = 7217400, upload-time = "2026-04-01T14:45:20.529Z" }, + { url = "https://files.pythonhosted.org/packages/d6/94/220e46c73065c3e2951bb91c11a1fb636c8c9ad427ac3ce7d7f3359b9b2f/pillow-12.2.0-cp314-cp314-win_arm64.whl", hash = "sha256:b1c1fbd8a5a1af3412a0810d060a78b5136ec0836c8a4ef9aa11807f2a22f4e1", size = 2554835, upload-time = "2026-04-01T14:45:23.162Z" }, + { url = "https://files.pythonhosted.org/packages/b6/ab/1b426a3974cb0e7da5c29ccff4807871d48110933a57207b5a676cccc155/pillow-12.2.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:57850958fe9c751670e49b2cecf6294acc99e562531f4bd317fa5ddee2068463", size = 5314225, upload-time = "2026-04-01T14:45:25.637Z" }, + { url = "https://files.pythonhosted.org/packages/19/1e/dce46f371be2438eecfee2a1960ee2a243bbe5e961890146d2dee1ff0f12/pillow-12.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:d5d38f1411c0ed9f97bcb49b7bd59b6b7c314e0e27420e34d99d844b9ce3b6f3", size = 4698541, upload-time = "2026-04-01T14:45:28.355Z" }, + { url = "https://files.pythonhosted.org/packages/55/c3/7fbecf70adb3a0c33b77a300dc52e424dc22ad8cdc06557a2e49523b703d/pillow-12.2.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5c0a9f29ca8e79f09de89293f82fc9b0270bb4af1d58bc98f540cc4aedf03166", size = 6322251, upload-time = "2026-04-01T14:45:30.924Z" }, + { url = "https://files.pythonhosted.org/packages/1c/3c/7fbc17cfb7e4fe0ef1642e0abc17fc6c94c9f7a16be41498e12e2ba60408/pillow-12.2.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:1610dd6c61621ae1cf811bef44d77e149ce3f7b95afe66a4512f8c59f25d9ebe", size = 8127807, upload-time = "2026-04-01T14:45:33.908Z" }, + { url = "https://files.pythonhosted.org/packages/ff/c3/a8ae14d6defd2e448493ff512fae903b1e9bd40b72efb6ec55ce0048c8ce/pillow-12.2.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a34329707af4f73cf1782a36cd2289c0368880654a2c11f027bcee9052d35dd", size = 6433935, upload-time = "2026-04-01T14:45:36.623Z" }, + { url = "https://files.pythonhosted.org/packages/6e/32/2880fb3a074847ac159d8f902cb43278a61e85f681661e7419e6596803ed/pillow-12.2.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8e9c4f5b3c546fa3458a29ab22646c1c6c787ea8f5ef51300e5a60300736905e", size = 7116720, upload-time = "2026-04-01T14:45:39.258Z" }, + { url = "https://files.pythonhosted.org/packages/46/87/495cc9c30e0129501643f24d320076f4cc54f718341df18cc70ec94c44e1/pillow-12.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:fb043ee2f06b41473269765c2feae53fc2e2fbf96e5e22ca94fb5ad677856f06", size = 6540498, upload-time = "2026-04-01T14:45:41.879Z" }, + { url = "https://files.pythonhosted.org/packages/18/53/773f5edca692009d883a72211b60fdaf8871cbef075eaa9d577f0a2f989e/pillow-12.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:f278f034eb75b4e8a13a54a876cc4a5ab39173d2cdd93a638e1b467fc545ac43", size = 7239413, upload-time = "2026-04-01T14:45:44.705Z" }, + { url = "https://files.pythonhosted.org/packages/c9/e4/4b64a97d71b2a83158134abbb2f5bd3f8a2ea691361282f010998f339ec7/pillow-12.2.0-cp314-cp314t-win32.whl", hash = "sha256:6bb77b2dcb06b20f9f4b4a8454caa581cd4dd0643a08bacf821216a16d9c8354", size = 6482084, upload-time = "2026-04-01T14:45:47.568Z" }, + { url = "https://files.pythonhosted.org/packages/ba/13/306d275efd3a3453f72114b7431c877d10b1154014c1ebbedd067770d629/pillow-12.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:6562ace0d3fb5f20ed7290f1f929cae41b25ae29528f2af1722966a0a02e2aa1", size = 7225152, upload-time = "2026-04-01T14:45:50.032Z" }, + { url = "https://files.pythonhosted.org/packages/ff/6e/cf826fae916b8658848d7b9f38d88da6396895c676e8086fc0988073aaf8/pillow-12.2.0-cp314-cp314t-win_arm64.whl", hash = "sha256:aa88ccfe4e32d362816319ed727a004423aab09c5cea43c01a4b435643fa34eb", size = 2556579, upload-time = "2026-04-01T14:45:52.529Z" }, + { url = "https://files.pythonhosted.org/packages/4e/b7/2437044fb910f499610356d1352e3423753c98e34f915252aafecc64889f/pillow-12.2.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0538bd5e05efec03ae613fd89c4ce0368ecd2ba239cc25b9f9be7ed426b0af1f", size = 5273969, upload-time = "2026-04-01T14:45:55.538Z" }, + { url = "https://files.pythonhosted.org/packages/f6/f4/8316e31de11b780f4ac08ef3654a75555e624a98db1056ecb2122d008d5a/pillow-12.2.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:394167b21da716608eac917c60aa9b969421b5dcbbe02ae7f013e7b85811c69d", size = 4659674, upload-time = "2026-04-01T14:45:58.093Z" }, + { url = "https://files.pythonhosted.org/packages/d4/37/664fca7201f8bb2aa1d20e2c3d5564a62e6ae5111741966c8319ca802361/pillow-12.2.0-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5d04bfa02cc2d23b497d1e90a0f927070043f6cbf303e738300532379a4b4e0f", size = 5288479, upload-time = "2026-04-01T14:46:01.141Z" }, + { url = "https://files.pythonhosted.org/packages/49/62/5b0ed78fce87346be7a5cfcfaaad91f6a1f98c26f86bdbafa2066c647ef6/pillow-12.2.0-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0c838a5125cee37e68edec915651521191cef1e6aa336b855f495766e77a366e", size = 7032230, upload-time = "2026-04-01T14:46:03.874Z" }, + { url = "https://files.pythonhosted.org/packages/c3/28/ec0fc38107fc32536908034e990c47914c57cd7c5a3ece4d8d8f7ffd7e27/pillow-12.2.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a6c9fa44005fa37a91ebfc95d081e8079757d2e904b27103f4f5fa6f0bf78c0", size = 5355404, upload-time = "2026-04-01T14:46:06.33Z" }, + { url = "https://files.pythonhosted.org/packages/5e/8b/51b0eddcfa2180d60e41f06bd6d0a62202b20b59c68f5a132e615b75aecf/pillow-12.2.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:25373b66e0dd5905ed63fa3cae13c82fbddf3079f2c8bf15c6fb6a35586324c1", size = 6002215, upload-time = "2026-04-01T14:46:08.83Z" }, + { url = "https://files.pythonhosted.org/packages/bc/60/5382c03e1970de634027cee8e1b7d39776b778b81812aaf45b694dfe9e28/pillow-12.2.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:bfa9c230d2fe991bed5318a5f119bd6780cda2915cca595393649fc118ab895e", size = 7080946, upload-time = "2026-04-01T14:46:11.734Z" }, ] [[package]] @@ -2018,20 +2024,20 @@ wheels = [ [[package]] name = "python-dotenv" -version = "1.1.1" +version = "1.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +sdist = { url = "https://files.pythonhosted.org/packages/82/ed/0301aeeac3e5353ef3d94b6ec08bbcabd04a72018415dcb29e588514bba8/python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3", size = 50135, upload-time = "2026-03-01T16:00:26.196Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, + { url = "https://files.pythonhosted.org/packages/0b/d7/1959b9648791274998a9c3526f6d0ec8fd2233e4d4acce81bbae76b44b2a/python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a", size = 22101, upload-time = "2026-03-01T16:00:25.09Z" }, ] [[package]] name = "python-multipart" -version = "0.0.22" +version = "0.0.26" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/94/01/979e98d542a70714b0cb2b6728ed0b7c46792b695e3eaec3e20711271ca3/python_multipart-0.0.22.tar.gz", hash = "sha256:7340bef99a7e0032613f56dc36027b959fd3b30a787ed62d310e951f7c3a3a58", size = 37612, upload-time = "2026-01-25T10:15:56.219Z" } +sdist = { url = "https://files.pythonhosted.org/packages/88/71/b145a380824a960ebd60e1014256dbb7d2253f2316ff2d73dfd8928ec2c3/python_multipart-0.0.26.tar.gz", hash = "sha256:08fadc45918cd615e26846437f50c5d6d23304da32c341f289a617127b081f17", size = 43501, upload-time = "2026-04-10T14:09:59.473Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1b/d0/397f9626e711ff749a95d96b7af99b9c566a9bb5129b8e4c10fc4d100304/python_multipart-0.0.22-py3-none-any.whl", hash = "sha256:2b2cd894c83d21bf49d702499531c7bafd057d730c201782048f7945d82de155", size = 24579, upload-time = "2026-01-25T10:15:54.811Z" }, + { url = "https://files.pythonhosted.org/packages/9a/22/f1925cdda983ab66fc8ec6ec8014b959262747e58bdca26a4e3d1da29d56/python_multipart-0.0.26-py3-none-any.whl", hash = "sha256:c0b169f8c4484c13b0dcf2ef0ec3a4adb255c4b7d18d8e420477d2b1dd03f185", size = 28847, upload-time = "2026-04-10T14:09:58.131Z" }, ] [[package]] @@ -2309,14 +2315,14 @@ wheels = [ [[package]] name = "s3transfer" -version = "0.16.0" +version = "0.17.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "botocore" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/05/04/74127fc843314818edfa81b5540e26dd537353b123a4edc563109d8f17dd/s3transfer-0.16.0.tar.gz", hash = "sha256:8e990f13268025792229cd52fa10cb7163744bf56e719e0b9cb925ab79abf920", size = 153827, upload-time = "2025-12-01T02:30:59.114Z" } +sdist = { url = "https://files.pythonhosted.org/packages/9b/ec/7c692cde9125b77e84b307354d4fb705f98b8ccad59a036d5957ca75bfc3/s3transfer-0.17.0.tar.gz", hash = "sha256:9edeb6d1c3c2f89d6050348548834ad8289610d886e5bf7b7207728bd43ce33a", size = 155337, upload-time = "2026-04-29T22:07:36.33Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/fc/51/727abb13f44c1fcf6d145979e1535a35794db0f6e450a0cb46aa24732fe2/s3transfer-0.16.0-py3-none-any.whl", hash = "sha256:18e25d66fed509e3868dc1572b3f427ff947dd2c56f844a5bf09481ad3f3b2fe", size = 86830, upload-time = "2025-12-01T02:30:57.729Z" }, + { url = "https://files.pythonhosted.org/packages/87/72/c6c32d2b657fa3dad1de340254e14390b1e334ce38268b7ad51abda3c8c2/s3transfer-0.17.0-py3-none-any.whl", hash = "sha256:ce3801712acf4ad3e89fb9990df97b4972e93f4b3b0004d214be5bce12814c20", size = 86811, upload-time = "2026-04-29T22:07:34.966Z" }, ] [[package]]